code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 55
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
_lowerCAmelCase = "CIDAS/clipseg-rd64-refined"
_lowerCAmelCase = "image_segmenter"
_lowerCAmelCase = CLIPSegForImageSegmentation
_lowerCAmelCase = ["image", "text"]
_lowerCAmelCase = ["image"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
requires_backends(self , ['''vision'''] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors='''pt''' )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
A__ : Dict = self.model(**UpperCamelCase__ ).logits
return logits
def __snake_case ( self , UpperCamelCase__ ):
A__ : Union[str, Any] = outputs.cpu().detach().numpy()
A__ : Dict = 0
A__ : Dict = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "" ) -> bool:
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return True
A__ : Tuple = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ : dict[str, int] = {}
for character in lower_case_input_str:
A__ : Union[str, Any] = character_freq_dict.get(__UpperCamelCase , 0 ) + 1
A__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''' , __UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_SCREAMING_SNAKE_CASE : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 55
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 1
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_SCREAMING_SNAKE_CASE : int = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_SCREAMING_SNAKE_CASE : Tuple = 'main'
# Default branch name
_SCREAMING_SNAKE_CASE : Optional[Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
_SCREAMING_SNAKE_CASE : List[str] = 'aaaaaaa'
# This commit does not exist, so we should 404.
_SCREAMING_SNAKE_CASE : List[str] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
_SCREAMING_SNAKE_CASE : Optional[Any] = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self , UpperCamelCase__ ):
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self , UpperCamelCase__ ):
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def __snake_case ( self , UpperCamelCase__ ):
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def __snake_case ( self ):
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels'''] )
@require_tf
def __snake_case ( self ):
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels'''] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''start_positions''', '''end_positions'''] )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ['''labels'''] )
@require_flax
def __snake_case ( self ):
# Flax models don't have labels
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
| 55
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 1
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = CLIPConfig
_lowerCAmelCase = ["CLIPEncoderLayer"]
def __init__( self , UpperCamelCase__ ):
super().__init__(UpperCamelCase__ )
A__ : str = CLIPVisionModelWithProjection(config.vision_config )
A__ : Dict = nn.Linear(config.vision_config.projection_dim , 1 )
A__ : str = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0.5 , UpperCamelCase__=0.5 ):
A__ : Optional[Any] = self.vision_model(UpperCamelCase__ )[0]
A__ : int = self.p_head(UpperCamelCase__ )
A__ : int = nsfw_detected.flatten()
A__ : List[Any] = nsfw_detected > p_threshold
A__ : Dict = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
A__ : List[str] = np.zeros(images[idx].shape )
A__ : Union[str, Any] = self.w_head(UpperCamelCase__ )
A__ : List[Any] = watermark_detected.flatten()
A__ : Optional[int] = watermark_detected > w_threshold
A__ : Optional[int] = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
A__ : int = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 55
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 1
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCAmelCase = 10_000
_lowerCAmelCase = None
_lowerCAmelCase = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCAmelCase = ParquetConfig
def __snake_case ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCamelCase__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
A__ : Optional[int] = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : Optional[int] = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A__ : Dict = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : int = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , '''rb''' ) as f:
A__ : List[Any] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCamelCase__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : str = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCamelCase__ ):
A__ : Dict = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , '''rb''' ) as f:
A__ : Dict = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A__ : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}" )
raise
| 55
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "laptop" ) -> DataFrame:
"""simple docstring"""
A__ : List[Any] = F"https://www.amazon.in/laptop/s?k={product}"
A__ : Dict = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A__ : Any = BeautifulSoup(requests.get(__UpperCamelCase , headers=__UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A__ : Optional[Any] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A__ : Optional[int] = item.ha.text
A__ : Optional[Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A__ : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A__ : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A__ : str = '''Not available'''
try:
A__ : Optional[int] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A__ : Any = ''''''
try:
A__ : Union[str, Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
A__ : Any = float('''nan''' )
except AttributeError:
pass
A__ : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ : Tuple = ''' '''
A__ : Union[str, Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = 'headphones'
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 55
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 1
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bool , __UpperCamelCase : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> ["tf.Tensor"]:
"""simple docstring"""
A__ : Any = random.Random()
A__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = "TensorFlow"
@property
def __snake_case ( self ):
return tf.__version__
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# initialize GPU on separate process
A__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : str = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : Dict = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
A__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : List[Any] = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
A__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : str = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
A__ : Union[str, Any] = (
hasattr(UpperCamelCase__ , '''architectures''' )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ : Optional[int] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ : int = __import__('''transformers''' , fromlist=[model_class] )
A__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
A__ : Tuple = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
A__ : List[str] = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size
A__ : List[str] = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
A__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
A__ : Any = (
hasattr(UpperCamelCase__ , '''architectures''' )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ : List[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ : Tuple = __import__('''transformers''' , fromlist=[model_class] )
A__ : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
A__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
A__ : Tuple = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size
A__ : str = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A__ : Union[str, Any] = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
A__ : Union[str, Any] = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
A__ : Dict = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
A__ : Optional[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self , UpperCamelCase__ ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A__ : Dict = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __snake_case ( self , UpperCamelCase__ ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
A__ : Optional[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
A__ : int = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
A__ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A__ : str = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
A__ : Any = meminfo.used
A__ : int = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
A__ : Any = None
else:
A__ : Optional[int] = measure_peak_memory_cpu(UpperCamelCase__ )
A__ : Dict = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
A__ : str = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
A__ : Tuple = summary.total
else:
A__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 55
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
A__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : str = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
A__ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
# load decoder from hub
A__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def __snake_case ( self , **UpperCamelCase__ ):
A__ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Any = self.get_tokenizer()
A__ : Any = self.get_feature_extractor()
A__ : List[str] = self.get_decoder()
A__ : Dict = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ):
A__ : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(UpperCamelCase__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ):
A__ : str = self.get_feature_extractor()
A__ : Dict = self.get_tokenizer()
A__ : str = self.get_decoder()
A__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Tuple = floats_list((3, 1000) )
A__ : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' )
A__ : int = processor(UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : List[Any] = self.get_feature_extractor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : List[Any] = self.get_decoder()
A__ : Dict = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Union[str, Any] = '''This is a test string'''
A__ : Optional[Any] = processor(text=UpperCamelCase__ )
A__ : str = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , UpperCamelCase__=(2, 10, 16) , UpperCamelCase__=77 ):
np.random.seed(UpperCamelCase__ )
return np.random.rand(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_feature_extractor()
A__ : Dict = self.get_tokenizer()
A__ : Optional[Any] = self.get_decoder()
A__ : int = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Any = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A__ : str = processor.decode(UpperCamelCase__ )
A__ : List[Any] = decoder.decode_beams(UpperCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = self.get_feature_extractor()
A__ : int = self.get_tokenizer()
A__ : int = self.get_decoder()
A__ : str = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A__ : Tuple = processor.batch_decode(UpperCamelCase__ )
else:
with get_context(UpperCamelCase__ ).Pool() as pool:
A__ : List[Any] = processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as p:
A__ : str = decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ )
A__ , A__ , A__ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score )
def __snake_case ( self ):
A__ : List[Any] = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : Dict = self.get_decoder()
A__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Optional[int] = self._get_dummy_logits()
A__ : Dict = 15
A__ : Union[str, Any] = -2_0.0
A__ : List[Any] = -4.0
A__ : List[Any] = processor.batch_decode(
UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A__ : Optional[Any] = decoded_processor_out.text
A__ : str = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as pool:
A__ : Dict = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A__ : List[str] = [d[0][0] for d in decoded_decoder_out]
A__ : Any = [d[0][2] for d in decoded_decoder_out]
A__ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCamelCase__ )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , UpperCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , UpperCamelCase__ , atol=1e-3 ) )
def __snake_case ( self ):
A__ : Optional[int] = self.get_feature_extractor()
A__ : Tuple = self.get_tokenizer()
A__ : Optional[int] = self.get_decoder()
A__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : int = self._get_dummy_logits()
A__ : Optional[Any] = 2.0
A__ : Union[str, Any] = 5.0
A__ : Tuple = -2_0.0
A__ : Tuple = True
A__ : int = processor.batch_decode(
UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
A__ : Any = decoded_processor_out.text
A__ : int = list(UpperCamelCase__ )
decoder.reset_params(
alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
with get_context('''fork''' ).Pool() as pool:
A__ : Optional[int] = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , )
A__ : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCamelCase__ )
A__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Any = processor.decoder.model_container[processor.decoder._model_key]
A__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A__ : Optional[Any] = os.listdir(UpperCamelCase__ )
A__ : Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
A__ : Dict = WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ )
A__ : List[str] = processor.decoder.model_container[processor.decoder._model_key]
A__ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A__ : Tuple = os.listdir(UpperCamelCase__ )
A__ : int = os.listdir(UpperCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Dict = floats_list((3, 1000) )
A__ : List[Any] = processor_wavaveca(UpperCamelCase__ , return_tensors='''np''' )
A__ : Tuple = processor_auto(UpperCamelCase__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A__ : int = self._get_dummy_logits()
A__ : str = processor_wavaveca.batch_decode(UpperCamelCase__ )
A__ : Union[str, Any] = processor_auto.batch_decode(UpperCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ):
A__ : Any = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : int = self.get_decoder()
A__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : str = self._get_dummy_logits()[0]
A__ : str = processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __snake_case ( self ):
A__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Optional[int] = self._get_dummy_logits()
A__ : Any = processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ):
import torch
A__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCamelCase__ )
A__ : str = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
A__ : int = iter(UpperCamelCase__ )
A__ : Dict = next(UpperCamelCase__ )
A__ : str = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
A__ : Optional[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
A__ : Union[str, Any] = model(UpperCamelCase__ ).logits.cpu().numpy()
A__ : Tuple = processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ )
A__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A__ : List[Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
A__ : Union[str, Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , UpperCamelCase__ )
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , output.text )
# output times
A__ : List[str] = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''start_time''' ) )
A__ : Tuple = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''end_time''' ) )
# fmt: off
A__ : str = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
A__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
| 55
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 1
|
from math import factorial
_SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 60 , __UpperCamelCase : int = 1_00_00_00 ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
A__ : List[Any] = 0
# the cached sizes of the previous chains
A__ : dict[int, int] = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
A__ : List[Any] = set()
A__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A__ : Optional[int] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
A__ : List[str] = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A__ : Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 55
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 1
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
A__ : List[str] = primes[group]['''prime''']
A__ : Tuple = primes[group]['''generator''']
A__ : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def __snake_case ( self ):
return hex(self.__private_key )[2:]
def __snake_case ( self ):
A__ : Optional[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCamelCase__ )[2:]
def __snake_case ( self , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = int(UpperCamelCase__ , base=16 )
if not self.is_valid_public_key(UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : List[Any] = pow(UpperCamelCase__ , self.__private_key , self.prime )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase__ , (prime - 1) // 2 , UpperCamelCase__ ) == 1
)
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 14 ):
A__ : Optional[Any] = int(UpperCamelCase__ , base=16 )
A__ : Any = int(UpperCamelCase__ , base=16 )
A__ : int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : Union[str, Any] = pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["speech"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
requires_backends(self , ['''speech'''] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["speech"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
requires_backends(self , ['''speech'''] )
| 55
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 1
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 55
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Any = TypeVar('T')
_SCREAMING_SNAKE_CASE : Optional[int] = TypeVar('U')
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = key
A__ : Optional[int] = val
A__ : int = None
A__ : List[Any] = None
def __repr__( self ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
A__ : Dict = DoubleLinkedListNode(A__ , A__ )
A__ : Tuple = DoubleLinkedListNode(A__ , A__ )
A__ , A__ : Tuple = self.rear, self.head
def __repr__( self ):
A__ : str = ['''DoubleLinkedList''']
A__ : List[str] = self.head
while node.next is not None:
rep.append(str(A__ ) )
A__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(A__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
A__ : List[str] = node
A__ : Optional[Any] = previous
A__ : Optional[Any] = node
A__ : str = self.rear
def __snake_case ( self , UpperCamelCase__ ):
if node.prev is None or node.next is None:
return None
A__ : Dict = node.next
A__ : int = node.prev
A__ : List[Any] = None
A__ : Dict = None
return node
class UpperCamelCase__ ( Generic[T, U] ):
'''simple docstring'''
_lowerCAmelCase = {}
def __init__( self , UpperCamelCase__ ):
A__ : Optional[Any] = DoubleLinkedList()
A__ : Optional[Any] = capacity
A__ : Optional[Any] = 0
A__ : Tuple = 0
A__ : Tuple = 0
A__ : List[Any] = {}
def __repr__( self ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self , UpperCamelCase__ ):
return key in self.cache
def __snake_case ( self , UpperCamelCase__ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
A__ : Union[str, Any] = self.cache[key]
A__ : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(A__ )
return node.val
self.miss += 1
return None
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
A__ : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(A__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
A__ : int = DoubleLinkedListNode(A__ , A__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
A__ : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
A__ : List[str] = value
self.list.add(A__ )
@classmethod
def __snake_case ( cls , UpperCamelCase__ = 128 ):
def cache_decorator_inner(UpperCamelCase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCamelCase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
A__ : int = LRUCache(A__ )
A__ : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
A__ : Union[str, Any] = func(*A__ )
cls.decorator_function_to_instance_map[func].put(args[0] , A__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(A__ , '''cache_info''' , A__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ ( _snake_case ):
_lowerCAmelCase = ["image_processor"]
_lowerCAmelCase = "SamImageProcessor"
def __init__( self , UpperCamelCase__ ):
super().__init__(lowerCAmelCase__ )
A__ : Optional[int] = self.image_processor
A__ : Any = -10
A__ : Dict = self.image_processor.size['''longest_edge''']
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Any = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
A__ : Optional[Any] = encoding_image_processor['''original_sizes''']
if hasattr(lowerCAmelCase__ , '''numpy''' ): # Checks if Torch or TF tensor
A__ : Optional[int] = original_sizes.numpy()
A__ , A__ , A__ : Optional[Any] = self._check_and_preprocess_points(
input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , )
A__ : Optional[int] = self._normalize_and_convert(
lowerCAmelCase__ , lowerCAmelCase__ , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , )
return encoding_image_processor
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="pt" , ):
if input_points is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
A__ : Union[str, Any] = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] ) for point in input_points
]
else:
A__ : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ )
for point, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A__ , A__ : Tuple = self._pad_points_and_labels(lowerCAmelCase__ , lowerCAmelCase__ )
A__ : str = np.array(lowerCAmelCase__ )
if input_labels is not None:
A__ : Optional[int] = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
A__ : Optional[int] = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] , is_bounding_box=lowerCAmelCase__ )
for box in input_boxes
]
else:
A__ : Optional[int] = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ , is_bounding_box=lowerCAmelCase__ )
for box, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
A__ : List[str] = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
A__ : int = torch.from_numpy(lowerCAmelCase__ )
# boxes batch size of 1 by default
A__ : Any = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A__ : Optional[Any] = tf.convert_to_tensor(lowerCAmelCase__ )
# boxes batch size of 1 by default
A__ : str = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A__ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
A__ : Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A__ : Union[str, Any] = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
A__ : Any = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A__ : Dict = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
A__ : Dict = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A__ : int = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
A__ : Tuple = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = max([point.shape[0] for point in input_points] )
A__ : Optional[Any] = []
for i, point in enumerate(lowerCAmelCase__ ):
if point.shape[0] != expected_nb_points:
A__ : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A__ : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowerCAmelCase__ )
A__ : Union[str, Any] = processed_input_points
return input_points, input_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ , A__ : int = original_size
A__ , A__ : Any = self.image_processor._get_preprocess_shape(lowerCAmelCase__ , longest_edge=lowerCAmelCase__ )
A__ : int = deepcopy(lowerCAmelCase__ ).astype(lowerCAmelCase__ )
if is_bounding_box:
A__ : Optional[Any] = coords.reshape(-1 , 2 , 2 )
A__ : List[Any] = coords[..., 0] * (new_w / old_w)
A__ : Any = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A__ : Any = coords.reshape(-1 , 4 )
return coords
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ):
if input_points is not None:
if hasattr(lowerCAmelCase__ , '''numpy''' ): # Checks for TF or Torch tensor
A__ : List[str] = input_points.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_points[0] , lowerCAmelCase__ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
A__ : Union[str, Any] = [np.array(lowerCAmelCase__ ) for input_point in input_points]
else:
A__ : Dict = None
if input_labels is not None:
if hasattr(lowerCAmelCase__ , '''numpy''' ):
A__ : str = input_labels.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_labels[0] , lowerCAmelCase__ ):
raise ValueError('''Input labels must be a list of list integers.''' )
A__ : List[Any] = [np.array(lowerCAmelCase__ ) for label in input_labels]
else:
A__ : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowerCAmelCase__ , '''numpy''' ):
A__ : Tuple = input_boxes.numpy().tolist()
if (
not isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or not isinstance(input_boxes[0] , lowerCAmelCase__ )
or not isinstance(input_boxes[0][0] , lowerCAmelCase__ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
A__ : Dict = [np.array(lowerCAmelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
A__ : List[Any] = None
return input_points, input_labels, input_boxes
@property
def __snake_case ( self ):
A__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowerCAmelCase__ ) )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.image_processor.post_process_masks(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( snake_case__, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = CodeGenTokenizer
_lowerCAmelCase = CodeGenTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = {"""add_prefix_space""": True}
_lowerCAmelCase = False
def __snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A__ : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
A__ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A__ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __snake_case ( self , **UpperCamelCase__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __snake_case ( self , **UpperCamelCase__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = '''lower newer'''
A__ : Dict = '''lower newer'''
return input_text, output_text
def __snake_case ( self ):
A__ : Union[str, Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : Optional[Any] = '''lower newer'''
A__ : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A__ : List[Any] = tokenizer.tokenize(UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : Tuple = tokens + [tokenizer.unk_token]
A__ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __snake_case ( self ):
if not self.test_rust_tokenizer:
return
A__ : str = self.get_tokenizer()
A__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase_ )
A__ : int = '''lower newer'''
# Testing tokenization
A__ : Dict = tokenizer.tokenize(UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
A__ : Optional[Any] = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids without special tokens
A__ : Optional[int] = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
A__ : Optional[int] = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing conversion to ids with special tokens
A__ : Any = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase_ )
A__ : List[str] = tokenizer.encode(UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
A__ : int = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Testing the unknown token
A__ : str = tokens + [rust_tokenizer.unk_token]
A__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
pass
def __snake_case ( self , UpperCamelCase__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
A__ : List[Any] = '''This is a simple input'''
A__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
A__ : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='''max_length''' , )
def __snake_case ( self ):
A__ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
A__ : List[str] = '''This is a simple input'''
A__ : str = ['''This is a simple input looooooooong''', '''This is a simple input''']
A__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
A__ : Optional[int] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A__ : Dict = tokenizer.pad_token_id
A__ : str = tokenizer(UpperCAmelCase_ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
A__ : int = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncate=UpperCAmelCase_ , return_tensors='''np''' )
A__ : Dict = tokenizer(*UpperCAmelCase_ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
A__ : Optional[int] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncate=UpperCAmelCase_ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __snake_case ( self ):
A__ : List[str] = '''$$$'''
A__ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase_ , add_bos_token=UpperCAmelCase_ )
A__ : Union[str, Any] = '''This is a simple input'''
A__ : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ : Optional[int] = tokenizer.bos_token_id
A__ : Optional[Any] = tokenizer(UpperCAmelCase_ )
A__ : str = tokenizer(UpperCAmelCase_ )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A__ : str = tokenizer.decode(out_s.input_ids )
A__ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __snake_case ( self ):
A__ : List[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
A__ : Optional[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
A__ : Optional[int] = '''\nif len_a > len_b: result = a\nelse: result = b'''
A__ : int = tokenizer.encode(UpperCAmelCase_ )
A__ : Optional[Any] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^\"\"\"''', '''\n\n\n''']
A__ : List[Any] = tokenizer.decode(UpperCAmelCase_ , truncate_before_pattern=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
pass
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = "arrow" , **UpperCamelCase__ , ):
super().__init__(
split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , **A_ , )
A__ : Any = load_from_cache_file
A__ : Optional[int] = file_format
A__ : Union[str, Any] = Spark(
df=A_ , features=A_ , cache_dir=A_ , working_dir=A_ , **A_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
import os
# Precomputes a list of the 100 first triangular numbers
_SCREAMING_SNAKE_CASE : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ : Dict = os.path.dirname(os.path.realpath(_lowercase ) )
A__ : Optional[Any] = os.path.join(_lowercase , '''words.txt''' )
A__ : List[Any] = ''
with open(_lowercase ) as f:
A__ : str = f.readline()
A__ : Dict = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
A__ : Union[str, Any] = [
word
for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowercase )
if __name__ == "__main__":
print(solution())
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=14 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_token_type_ids
A__ = use_input_mask
A__ = use_labels
A__ = use_mc_token_ids
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = self.vocab_size - 1
def __snake_case ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
if self.use_mc_token_ids:
A__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
A__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ):
A__ = CTRLModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ):
A__ = CTRLLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self ):
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ):
A__ = self.num_labels
A__ = CTRLForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCamelCase__ ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_lowerCAmelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
_lowerCAmelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __snake_case ( self ):
A__ = CTRLModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def __snake_case ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCamelCase )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __snake_case ( self ):
pass
@slow
def __snake_case ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = CTRLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __snake_case ( self ):
pass
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __snake_case ( self ):
A__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__lowerCamelCase )
A__ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCamelCase ) # Legal the president is
A__ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
A__ = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=14 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.0_2 , ):
A__ : int = parent
A__ : List[str] = batch_size
A__ : Optional[Any] = seq_length
A__ : Tuple = is_training
A__ : Optional[Any] = use_input_mask
A__ : int = use_token_type_ids
A__ : Optional[int] = use_labels
A__ : Any = vocab_size
A__ : Dict = hidden_size
A__ : Any = rotary_dim
A__ : Union[str, Any] = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : List[str] = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : str = initializer_range
A__ : List[str] = None
A__ : str = vocab_size - 1
A__ : Optional[Any] = vocab_size - 1
A__ : Optional[Any] = vocab_size - 1
def __snake_case ( self ):
A__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict = None
if self.use_input_mask:
A__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __snake_case ( self ):
A__ : int = self.prepare_config_and_inputs()
A__ : Optional[int] = config_and_inputs
A__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = 20
A__ : int = model_class_name(UpperCAmelCase_ )
A__ : int = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
A__ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A__ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A__ : int = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
A__ : Optional[Any] = model(UpperCAmelCase_ )
A__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = 20
A__ : int = model_class_name(UpperCAmelCase_ )
A__ : Optional[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A__ : Any = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
A__ : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A__ : List[str] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
A__ : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
A__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class UpperCamelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_lowerCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __snake_case ( self ):
A__ : List[Any] = FlaxGPTJModelTester(self )
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def __snake_case ( self ):
A__ : Dict = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A__ : Optional[int] = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
A__ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A__ : Tuple = False
A__ : Any = model.config.eos_token_id
A__ : Optional[Any] = jax.jit(model.generate )
A__ : Any = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A__ : int = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
A__ : Any = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A__ : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : str = pt_inputs['input_ids'].shape
A__ : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
A__ : List[str] = 0
A__ : Dict = 1
A__ : Optional[int] = 0
A__ : Optional[int] = 1
A__ : Optional[Any] = pt_model_class(UpperCAmelCase_ ).eval()
A__ : str = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
A__ : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
A__ : Dict = fx_state
with torch.no_grad():
A__ : str = pt_model(**UpperCAmelCase_ ).to_tuple()
A__ : Optional[Any] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
A__ : List[Any] = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
A__ : int = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A__ : int = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
A__ : List[Any] = pt_model_class(UpperCAmelCase_ ).eval()
A__ : str = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
A__ : Optional[int] = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
A__ : Union[str, Any] = pt_inputs['input_ids'].shape
A__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
A__ : List[str] = 0
A__ : Optional[int] = 1
A__ : List[str] = 0
A__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A__ : List[Any] = pt_model(**UpperCAmelCase_ ).to_tuple()
A__ : Optional[Any] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
A__ : int = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
A__ : Dict = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __snake_case ( self ):
for model_class_name in self.all_model_classes:
A__ : Optional[int] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A__ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> int:
"""simple docstring"""
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError('''String lengths must match!''' )
A__ : Dict = 0
for chara, chara in zip(__UpperCamelCase , __UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = (16, 32, 96, 256)
_lowerCAmelCase = jnp.floataa
def __snake_case ( self ):
A__ : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A__ : int = []
for i in range(len(self.block_out_channels ) - 1 ):
A__ : Optional[Any] = self.block_out_channels[i]
A__ : str = self.block_out_channels[i + 1]
A__ : str = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
A__ : Optional[int] = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
A__ : str = blocks
A__ : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ ):
A__ : Optional[int] = self.conv_in(_lowerCAmelCase )
A__ : Any = nn.silu(_lowerCAmelCase )
for block in self.blocks:
A__ : Any = block(_lowerCAmelCase )
A__ : Dict = nn.silu(_lowerCAmelCase )
A__ : Union[str, Any] = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class UpperCamelCase__ ( nn.Module, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 32
_lowerCAmelCase = 4
_lowerCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowerCAmelCase = False
_lowerCAmelCase = (320, 640, 1_280, 1_280)
_lowerCAmelCase = 2
_lowerCAmelCase = 8
_lowerCAmelCase = None
_lowerCAmelCase = 1_280
_lowerCAmelCase = 0.0
_lowerCAmelCase = False
_lowerCAmelCase = jnp.floataa
_lowerCAmelCase = True
_lowerCAmelCase = 0
_lowerCAmelCase = "rgb"
_lowerCAmelCase = (16, 32, 96, 256)
def __snake_case ( self , UpperCamelCase__ ):
# init input tensors
A__ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
A__ : Union[str, Any] = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
A__ : Any = jnp.ones((1,) , dtype=jnp.intaa )
A__ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A__ : str = (1, 3, self.sample_size * 8, self.sample_size * 8)
A__ : Optional[int] = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
A__ , A__ : Tuple = jax.random.split(_lowerCAmelCase )
A__ : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def __snake_case ( self ):
A__ : Tuple = self.block_out_channels
A__ : List[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
A__ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A__ : int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A__ : Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
A__ : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A__ : Dict = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
A__ : List[Any] = []
A__ : List[Any] = []
A__ : List[Any] = block_out_channels[0]
A__ : str = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
A__ : Tuple = output_channel
A__ : Any = block_out_channels[i]
A__ : int = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A__ : Any = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
A__ : Any = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
A__ : List[Any] = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
A__ : Tuple = down_blocks
A__ : Optional[int] = controlnet_down_blocks
# mid
A__ : Union[str, Any] = block_out_channels[-1]
A__ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A__ : int = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = False , ):
A__ : Any = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A__ : int = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
A__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
A__ : int = timesteps.astype(dtype=jnp.floataa )
A__ : Tuple = jnp.expand_dims(_lowerCAmelCase , 0 )
A__ : Dict = self.time_proj(_lowerCAmelCase )
A__ : Tuple = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
A__ : List[Any] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
A__ : Union[str, Any] = self.conv_in(_lowerCAmelCase )
A__ : int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
A__ : Dict = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
A__ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
A__ , A__ : int = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
A__ , A__ : Any = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A__ : List[str] = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
A__ : Any = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
A__ : Union[str, Any] = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
A__ : str = controlnet_down_block_res_samples
A__ : int = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
A__ : Optional[int] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
import math
_SCREAMING_SNAKE_CASE : Any = 1_0
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 20 ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[int] = math.comb(_lowerCAmelCase , _lowerCAmelCase )
A__ : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCAmelCase )
A__ : Any = NUM_COLOURS * (1 - missing_colour / total)
return F"{result:.9f}"
if __name__ == "__main__":
print(solution(2_0))
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> list:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ):
A__ : str = False
for j in range(__lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
A__ : str = unsorted[j - 1], unsorted[j]
A__ : Dict = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
A__ : Tuple = unsorted[j + 1], unsorted[j]
A__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by a comma:\n').strip()
_SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__ ( _snake_case ):
'''simple docstring'''
_lowerCAmelCase = 'time_series_transformer'
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "student_t" , UpperCamelCase__ = "nll" , UpperCamelCase__ = 1 , UpperCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase__ = "mean" , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 32 , UpperCamelCase__ = 32 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = 2 , UpperCamelCase__ = True , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 64 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.0_2 , UpperCamelCase__=True , **UpperCamelCase__ , ):
A__ : str = prediction_length
A__ : Union[str, Any] = context_length or prediction_length
A__ : Tuple = distribution_output
A__ : Optional[Any] = loss
A__ : Dict = input_size
A__ : Dict = num_time_features
A__ : Dict = lags_sequence
A__ : int = scaling
A__ : int = num_dynamic_real_features
A__ : Tuple = num_static_real_features
A__ : int = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
A__ : Tuple = cardinality
else:
A__ : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
A__ : List[str] = embedding_dimension
else:
A__ : Optional[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ : Tuple = num_parallel_samples
# Transformer architecture configuration
A__ : Tuple = input_size * len(UpperCamelCase__ ) + self._number_of_features
A__ : Dict = d_model
A__ : Dict = encoder_attention_heads
A__ : Optional[int] = decoder_attention_heads
A__ : Optional[int] = encoder_ffn_dim
A__ : str = decoder_ffn_dim
A__ : Optional[Any] = encoder_layers
A__ : Optional[Any] = decoder_layers
A__ : Union[str, Any] = dropout
A__ : Optional[int] = attention_dropout
A__ : List[str] = activation_dropout
A__ : Any = encoder_layerdrop
A__ : Optional[int] = decoder_layerdrop
A__ : int = activation_function
A__ : List[Any] = init_std
A__ : Tuple = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
if n_term == "":
return []
A__ : List[str] = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(F"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase = "gpt_neox"
def __init__( self , UpperCamelCase__=5_0432 , UpperCamelCase__=6144 , UpperCamelCase__=44 , UpperCamelCase__=64 , UpperCamelCase__=2_4576 , UpperCamelCase__="gelu" , UpperCamelCase__=0.2_5 , UpperCamelCase__=1_0000 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__=2048 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-5 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
A__ : Tuple = vocab_size
A__ : str = max_position_embeddings
A__ : int = hidden_size
A__ : str = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : Dict = intermediate_size
A__ : Dict = hidden_act
A__ : Tuple = rotary_pct
A__ : Optional[Any] = rotary_emb_base
A__ : str = attention_dropout
A__ : List[str] = hidden_dropout
A__ : List[Any] = classifier_dropout
A__ : str = initializer_range
A__ : Dict = layer_norm_eps
A__ : Any = use_cache
A__ : int = tie_word_embeddings
A__ : List[Any] = use_parallel_residual
A__ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __snake_case ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
A__ : int = self.rope_scaling.get('''type''' , _A )
A__ : Any = self.rope_scaling.get('''factor''' , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str]=False ):
"""simple docstring"""
A__ : Union[str, Any] = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : str=None ):
"""simple docstring"""
if conf_path is None:
A__ : List[Any] = "./model_checkpoints/vqgan_only.yaml"
A__ : Dict = load_config(_lowerCamelCase , display=_lowerCamelCase )
A__ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
A__ : List[Any] = "./model_checkpoints/vqgan_only.pt"
A__ : Dict = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
A__ : List[str] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
"""simple docstring"""
A__ : List[str] = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
A__ : List[Any] = model.decode(_lowerCamelCase )
return xrec
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
"""simple docstring"""
A__ : str = string.rsplit('''.''' , 1 )
if reload:
A__ : Union[str, Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=True ):
"""simple docstring"""
A__ : Any = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if ckpt:
A__ : List[str] = torch.load(_lowerCamelCase , map_location='''cpu''' )
A__ : str = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
A__ : int = {"state_dict": None}
A__ : List[str] = None
A__ : Optional[Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_SCREAMING_SNAKE_CASE : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any]=1_00 , __UpperCamelCase : Dict=" " ) -> List[str]:
"""simple docstring"""
A__ : Optional[Any] = text.split(a_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a_ ) , a_ )]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict ) -> dict:
"""simple docstring"""
A__ : Any = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a_ ):
titles.append(title if title is not None else '''''' )
texts.append(a_ )
return {"title": titles, "text": texts}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict , __UpperCamelCase : DPRContextEncoder , __UpperCamelCase : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
A__ : str = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
A__ : Optional[Any] = ctx_encoder(input_ids.to(device=a_ ) , return_dict=a_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : "RagExampleArguments" , __UpperCamelCase : "ProcessingArguments" , __UpperCamelCase : "IndexHnswArguments" , ) -> Optional[int]:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A__ : Union[str, Any] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A__ : int = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc )
# And compute the embeddings
A__ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a_ )
A__ : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A__ : List[str] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
A__ : str = dataset.map(
partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_ ) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , )
# And finally save your dataset
A__ : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A__ : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a_ )
# And save the index
A__ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = field(
default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ), metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, )
_lowerCAmelCase = field(
default=_UpperCAmelCase, metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, )
_lowerCAmelCase = field(
default="facebook/rag-sequence-nq", metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, )
_lowerCAmelCase = field(
default="facebook/dpr-ctx_encoder-multiset-base", metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
}, )
_lowerCAmelCase = field(
default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" ), metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = field(
default=_UpperCAmelCase, metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
}, )
_lowerCAmelCase = field(
default=16, metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
}, )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = field(
default=768, metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, )
_lowerCAmelCase = field(
default=128, metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict[Optional[str], Type[Formatter]] = {}
_SCREAMING_SNAKE_CASE : Dict[Optional[str], str] = {}
_SCREAMING_SNAKE_CASE : Dict[Optional[str], Exception] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str = None , ) -> str:
"""simple docstring"""
A__ : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A__ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A__ : List[str] = format_type
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] = None ) -> Dict:
"""simple docstring"""
A__ : List[str] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A__ : str = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
_SCREAMING_SNAKE_CASE : Dict = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
_SCREAMING_SNAKE_CASE : List[Any] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ) -> Formatter:
"""simple docstring"""
A__ : List[Any] = get_format_type_from_alias(_A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict = "" , ) -> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] = "" ) -> bool:
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return True
A__ : Optional[int] = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
A__ : Tuple = {}
for character in lower_case_input_str:
A__ : List[str] = character_freq_dict.get(__UpperCamelCase , 0 ) + 1
A__ : Optional[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any = "" ) -> None:
"""simple docstring"""
print('''\nFor string = ''' , __UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
_SCREAMING_SNAKE_CASE : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_SCREAMING_SNAKE_CASE : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_SCREAMING_SNAKE_CASE : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str="binary" ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
A__ : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
A__ : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
A__ : Optional[int] = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
A__ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
A__ : str = [(pred, label)]
A__ : List[str] = [], []
for question, preds_labels in question_map.items():
A__ : Optional[Any] = zip(*__UpperCamelCase )
A__ : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average='''macro''' )
fas.append(__UpperCamelCase )
A__ : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
A__ : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
A__ : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
A__ : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __snake_case ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg='''macro''' )
elif self.config_name == "record":
A__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
A__ : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**9 ) -> Union[str, Any]:
"""simple docstring"""
A__ : Union[str, Any] = 1
A__ : Optional[int] = 2
A__ : Union[str, Any] = 0
A__ : Any = 0
A__ : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A__ : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict = 10 , __UpperCamelCase : Optional[int] = 22 ) -> int:
"""simple docstring"""
A__ : Dict = range(1 , __SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE_, R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
if self.framework == "tf":
A__ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = self.get_masked_index(UpperCamelCase__ )
A__ : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ):
if return_tensors is None:
A__ : Dict = self.framework
A__ : List[Any] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.ensure_exactly_one_mask_token(UpperCamelCase__ )
return model_inputs
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = self.model(**UpperCamelCase__ )
A__ : Optional[int] = model_inputs['''input_ids''']
return model_outputs
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=5 , UpperCamelCase__=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A__ : Dict = target_ids.shape[0]
A__ : int = model_outputs['''input_ids'''][0]
A__ : List[str] = model_outputs['''logits''']
if self.framework == "tf":
A__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ : Tuple = outputs.numpy()
A__ : int = outputs[0, masked_index, :]
A__ : str = stable_softmax(UpperCamelCase__ , axis=-1 )
if target_ids is not None:
A__ : str = tf.gather_nd(tf.squeeze(UpperCamelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ : Union[str, Any] = tf.expand_dims(UpperCamelCase__ , 0 )
A__ : Union[str, Any] = tf.math.top_k(UpperCamelCase__ , k=UpperCamelCase__ )
A__ , A__ : Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
A__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ : Optional[Any] = outputs[0, masked_index, :]
A__ : Union[str, Any] = logits.softmax(dim=-1 )
if target_ids is not None:
A__ : Dict = probs[..., target_ids]
A__ , A__ : Union[str, Any] = probs.topk(UpperCamelCase__ )
A__ : Optional[Any] = []
A__ : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ : Any = input_ids.numpy().copy()
if target_ids is not None:
A__ : int = target_ids[p].tolist()
A__ : Optional[int] = p
# Filter padding out:
A__ : Optional[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ : Optional[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : Any = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
if single_mask:
return result[0]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [targets]
try:
A__ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A__ : Optional[int] = {}
A__ : Tuple = []
for target in targets:
A__ : Optional[int] = vocab.get(UpperCamelCase__ , UpperCamelCase__ )
if id_ is None:
A__ : Dict = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , max_length=1 , truncation=UpperCamelCase__ , )['''input_ids''']
if len(UpperCamelCase__ ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A__ : Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
A__ : str = list(set(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A__ : Dict = np.array(UpperCamelCase__ )
return target_ids
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
A__ : Optional[Any] = {}
if targets is not None:
A__ : str = self.get_target_ids(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] = target_ids
if top_k is not None:
A__ : Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : str = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
_SCREAMING_SNAKE_CASE : Tuple = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
import operator as op
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] = []
A__ : Optional[Any] = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
A__ : Optional[int] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_lowerCamelCase ) , sep=''' | ''' )
else:
A__ : Dict = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_lowerCamelCase ) , sep=''' | ''' )
A__ : int = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_lowerCamelCase ) , sep=''' | ''' )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_lowerCamelCase ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=64 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=1 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = q_groups
A__ = k_groups
A__ = v_groups
A__ = post_attention_groups
A__ = intermediate_groups
A__ = output_groups
def __snake_case ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = SqueezeBertModel(config=_a )
model.to(_a )
model.eval()
A__ = model(_a , _a )
A__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = SqueezeBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
A__ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = SqueezeBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A__ = model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = self.num_labels
A__ = SqueezeBertForSequenceClassification(_a )
model.to(_a )
model.eval()
A__ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = self.num_labels
A__ = SqueezeBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
A__ = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = self.num_choices
A__ = SqueezeBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ):
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = False
def __snake_case ( self ):
A__ = SqueezeBertModelTester(self )
A__ = ConfigTester(self , config_class=_a , dim=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_a )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_a )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_a )
def __snake_case ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a )
@slow
def __snake_case ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SqueezeBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
A__ = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
A__ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
A__ = model(_a )[0]
A__ = torch.Size((1, 3) )
self.assertEqual(output.shape , _a )
A__ = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_a , _a , atol=1e-4 ) )
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
import inspect
import unittest
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
A__ : List[str] = inspect.getmembers(a_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
A__ : Optional[Any] = """k-diffusion"""
elif backend == "invisible_watermark":
A__ : Tuple = """invisible-watermark"""
assert backend in deps, F"{backend} is not in the deps table!"
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_SCREAMING_SNAKE_CASE : Dict = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_SCREAMING_SNAKE_CASE : Dict = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_SCREAMING_SNAKE_CASE : Optional[int] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_SCREAMING_SNAKE_CASE : List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_SCREAMING_SNAKE_CASE : Any = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=[1, 10, 100] , UpperCamelCase__=4 , UpperCamelCase__=3.0 ):
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=lowercase_ ) as executor:
A__ : Optional[Any] = []
A__ : List[str] = Counter()
A__ : Union[str, Any] = 0
A__ : List[str] = defaultdict(lowercase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowercase_ , lowercase_ ) ):
for candidate in candidates:
A__ : Any = candidate + """\n""" + test_case
A__ : Dict = (test_program, timeout, task_id, completion_id[task_id])
A__ : Union[str, Any] = executor.submit(lowercase_ , *lowercase_ )
futures.append(lowercase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowercase_ ):
A__ : Optional[Any] = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
A__ : int = [], []
for result in results.values():
result.sort()
A__ : str = [r[1]["""passed"""] for r in result]
total.append(len(lowercase_ ) )
correct.append(sum(lowercase_ ) )
A__ : List[str] = np.array(lowercase_ )
A__ : List[Any] = np.array(lowercase_ )
A__ : Optional[int] = k
A__ : List[Any] = {F"pass@{k}": estimate_pass_at_k(lowercase_ , lowercase_ , lowercase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str ) -> int:
"""simple docstring"""
def estimator(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ : Any = itertools.repeat(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
else:
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A__ : Union[str, Any] = iter(UpperCAmelCase__ )
return np.array([estimator(int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) , UpperCAmelCase__ ) for n, c in zip(UpperCAmelCase__ , UpperCAmelCase__ )] )
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=None , **__UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ : List[str] = [x.strip() for x in open(_lowercase ).readlines()]
A__ : Any = [x.strip() for x in open(_lowercase ).readlines()][: len(_lowercase )]
A__ : Dict = calculate_rouge(_lowercase , _lowercase , **_lowercase )
if save_path is not None:
save_json(_lowercase , _lowercase , indent=_lowercase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCamelCase__ ( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = BarthezTokenizer
_lowerCAmelCase = BarthezTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
def __snake_case ( self ):
super().setUp()
A__ : str = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCAmelCase )
A__ : Optional[Any] = tokenizer
def __snake_case ( self ):
A__ : str = '''<pad>'''
A__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def __snake_case ( self ):
A__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCAmelCase ) , 10_1122 )
def __snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __snake_case ( self ):
A__ : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A__ : Optional[int] = [0, 57, 3018, 7_0307, 91, 2]
A__ : int = self.tokenizer(
__lowerCAmelCase , max_length=len(__lowerCAmelCase ) , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __snake_case ( self ):
if not self.test_rust_tokenizer:
return
A__ : List[str] = self.get_tokenizer()
A__ : Optional[Any] = self.get_rust_tokenizer()
A__ : List[str] = '''I was born in 92000, and this is falsé.'''
A__ : List[str] = tokenizer.tokenize(__lowerCAmelCase )
A__ : List[str] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ : str = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
A__ : List[str] = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
A__ : List[Any] = self.get_rust_tokenizer()
A__ : Dict = tokenizer.encode(__lowerCAmelCase )
A__ : Tuple = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def __snake_case ( self ):
A__ : str = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A__ : List[Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__lowerCAmelCase , )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : str = {'facebook/bart-base': BartForConditionalGeneration}
_SCREAMING_SNAKE_CASE : Dict = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_lowercase , default=_lowercase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_lowercase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_lowercase , default=_lowercase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--config_name''' , type=_lowercase , default=_lowercase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_lowercase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_lowercase , default=_lowercase , help='''Where to store the final ONNX file.''' )
A__ : Dict = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]="cpu" ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
A__ : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
A__ : Dict = 0
A__ : List[str] = None
A__ : int = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) -> str:
"""simple docstring"""
model.eval()
A__ : Any = None
A__ : Dict = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
A__ : Optional[int] = '''My friends are cool but they eat too many carbs.'''
A__ : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
A__ : str = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_lowercase , max_length=_lowercase , early_stopping=_lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowercase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowercase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_lowercase , )
logger.info('''Model exported to {}'''.format(_lowercase ) )
A__ : str = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_lowercase ) )
A__ : int = onnxruntime.InferenceSession(_lowercase )
A__ : int = ort_sess.run(
_lowercase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_lowercase ),
'''max_length''': np.array(_lowercase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ : Any = parse_args()
A__ : str = 5
A__ : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A__ : int = torch.device(args.device )
A__ : List[Any] = load_model_tokenizer(args.model_name_or_path , _lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_lowercase )
if args.max_length:
A__ : Optional[Any] = args.max_length
if args.num_beams:
A__ : Dict = args.num_beams
if args.output_file_path:
A__ : Optional[int] = args.output_file_path
else:
A__ : Optional[Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> float:
"""simple docstring"""
return np.dot(__UpperCamelCase , __UpperCamelCase )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase__ = np.inf , UpperCamelCase__ = "linear" , UpperCamelCase__ = 0.0 , ):
A__ : Optional[int] = regularization
A__ : List[str] = gamma
if kernel == "linear":
A__ : Optional[int] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
A__ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
A__ : List[Any] = F"Unknown kernel: {kernel}"
raise ValueError(_SCREAMING_SNAKE_CASE )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = observations
A__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((A__) , ) : Any = np.shape(_SCREAMING_SNAKE_CASE )
def to_minimize(UpperCamelCase__ ) -> float:
A__ : str = 0
((A__) , ) : List[str] = np.shape(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = LinearConstraint(_SCREAMING_SNAKE_CASE , 0 , 0 )
A__ : int = Bounds(0 , self.regularization )
A__ : Optional[Any] = minimize(
_SCREAMING_SNAKE_CASE , np.ones(_SCREAMING_SNAKE_CASE ) , bounds=_SCREAMING_SNAKE_CASE , constraints=[ly_contraint] ).x
A__ : Dict = l_star
# calculating mean offset of separation plane to points
A__ : Dict = 0
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
A__ : Optional[int] = s / n
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _SCREAMING_SNAKE_CASE )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] = "AAPL" ) -> Optional[Any]:
"""simple docstring"""
A__ : List[str] = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
A__ : Union[str, Any] = BeautifulSoup(requests.get(__UpperCamelCase ).text , '''html.parser''' )
A__ : str = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
from ....utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=2048 ):
A__ : Optional[int] = config.__dict__
A__ : List[Any] = modal_hidden_size
if num_labels:
A__ : Optional[Any] = num_labels
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCamelCase__ ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = StableUnCLIPPipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : List[Any] = 32
A__ : Union[str, Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
A__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A__ : Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=A_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Optional[int] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=A_ , num_layers=1 , )
torch.manual_seed(0 )
A__ : Optional[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=A_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
A__ : Tuple = StableUnCLIPImageNormalizer(embedding_dim=A_ )
A__ : Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
A__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
A__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
A__ : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
A__ : Any = AutoencoderKL()
A__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
if str(A_ ).startswith('''mps''' ):
A__ : List[str] = torch.manual_seed(A_ )
else:
A__ : str = torch.Generator(device=A_ ).manual_seed(A_ )
A__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self ):
A__ : Any = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def __snake_case ( self ):
A__ : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=A_ )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
A__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A__ : Tuple = pipe('''anime turle''' , generator=A_ , output_type='''np''' )
A__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
A__ : List[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
A__ : List[Any] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
A__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_SCREAMING_SNAKE_CASE : Tuple = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_SCREAMING_SNAKE_CASE : List[Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=False , __UpperCamelCase : int=True , __UpperCamelCase : str=False , __UpperCamelCase : Tuple="dummy_doc" ):
"""simple docstring"""
A__ : List[str] = {doc: key_lines}
A__ : List[Any] = {doc: sys_lines}
A__ : List[str] = {}
A__ : List[Any] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 0
A__ : Optional[Any] = 0
A__ : List[str] = 0
A__ : List[str] = 0
A__ , A__ : Union[str, Any] = reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ : Any = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
A__ , A__ : str = reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ : Optional[Any] = reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
A__ , A__ : Tuple = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ : Optional[int] = reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ : int = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple = reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
A__ : Any = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] = get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Dict = {}
A__ : str = 0
A__ : int = 0
for name, metric in metrics:
A__ , A__ , A__ : Optional[Any] = evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 1_00:.2f}" , F" Precision: {precision * 1_00:.2f}" , F" F1: {fa * 1_00:.2f}" , )
if conll_subparts_num == 3:
A__ : Optional[Any] = (conll / 3) * 1_00
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Tuple = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
A__ : List[Any] = line.split()[5]
if not parse_col == "-":
A__ : List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False ):
A__ : Union[str, Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
A__ : Optional[int] = util.check_gold_parse_annotation(UpperCamelCase__ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ : List[Any] = evaluate(
key_lines=UpperCamelCase__ , sys_lines=UpperCamelCase__ , metrics=UpperCamelCase__ , NP_only=UpperCamelCase__ , remove_nested=UpperCamelCase__ , keep_singletons=UpperCamelCase__ , min_span=UpperCamelCase__ , )
return score
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self ):
A__ : int = 1
A__ : List[Any] = 3
A__ : Optional[Any] = (32, 32)
A__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
@property
def __snake_case ( self ):
def extract(*UpperCamelCase__ , **UpperCamelCase__ ):
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : Any = torch.ones([0] )
def __snake_case ( self , UpperCamelCase__ ):
self.pixel_values.to(lowerCAmelCase_ )
return self
return Out()
return extract
def __snake_case ( self ):
A__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : List[str] = self.dummy_cond_unet
A__ : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
A__ : Optional[int] = self.dummy_vae
A__ : List[str] = self.dummy_text_encoder
A__ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A__ : List[Any] = 77
A__ : str = self.dummy_image.to(lowerCAmelCase_ )
A__ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ : Tuple = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
A__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase_ )
A__ : Optional[int] = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int = '''A painting of a squirrel eating a burger'''
A__ : Optional[Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : List[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowerCAmelCase_ , )
A__ : Union[str, Any] = output.images
A__ : Any = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A__ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
A__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : Union[str, Any] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self ):
A__ : Optional[Any] = self.dummy_cond_unet
A__ : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
A__ : Optional[int] = self.dummy_vae
A__ : str = self.dummy_text_encoder
A__ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
A__ : int = 77
A__ : int = self.dummy_image.to(lowerCAmelCase_ )
# put models in fp16
A__ : Any = unet.half()
A__ : Optional[Any] = vae.half()
A__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
A__ : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , )
A__ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase_ )
A__ : Dict = alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : str = '''A painting of a squirrel eating a burger'''
A__ : Tuple = torch.manual_seed(0 )
A__ : Union[str, Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='''np''' , image=lowerCAmelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self ):
A__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ : Optional[int] = init_image.resize((760, 504) )
A__ : str = '''BAAI/AltDiffusion'''
A__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : List[str] = '''A fantasy landscape, trending on artstation'''
A__ : Any = torch.manual_seed(0 )
A__ : List[str] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , output_type='''np''' , )
A__ : int = output.images[0]
A__ : List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A__ : Any = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A__ : Any = init_image.resize((768, 512) )
A__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
A__ : Optional[Any] = '''BAAI/AltDiffusion'''
A__ : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
A__ : str = '''A fantasy landscape, trending on artstation'''
A__ : str = torch.manual_seed(0 )
A__ : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase_ , output_type='''np''' , )
A__ : int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( __lowercase, __lowercase ):
'''simple docstring'''
_lowerCAmelCase = '''nat'''
_lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase__=4 , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[3, 4, 6, 5] , UpperCamelCase__=[2, 4, 8, 16] , UpperCamelCase__=7 , UpperCamelCase__=3.0 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.1 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(**__a )
A__ : Any = patch_size
A__ : Any = num_channels
A__ : int = embed_dim
A__ : Tuple = depths
A__ : Optional[int] = len(__a )
A__ : Union[str, Any] = num_heads
A__ : Any = kernel_size
A__ : Dict = mlp_ratio
A__ : Tuple = qkv_bias
A__ : Optional[int] = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : Optional[int] = drop_path_rate
A__ : Union[str, Any] = hidden_act
A__ : int = layer_norm_eps
A__ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : int = int(embed_dim * 2 ** (len(__a ) - 1) )
A__ : Dict = layer_scale_init_value
A__ : int = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
A__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
A__ : Optional[int] = os.path.join(args.tf_model_dir , '''parameters.json''' )
A__ : List[Any] = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith('''.pt''' ):
A__ : List[Any] = args.output + '''.pt'''
A__ : str = OrderedDict()
with tf.device('''/CPU:0''' ):
A__ : Optional[Any] = tf.train.load_checkpoint(args.tf_model_dir )
A__ : Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ : Dict = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A__ : List[str] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A__ : int = 8
A__ : Any = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/moe''' ):
A__ : Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A__ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
A__ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
A__ : List[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
A__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A__ : Tuple = key_name[-9:-7]
for i in range(16 ):
A__ : List[str] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
A__ : Union[str, Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/mlp''' ):
A__ : Dict = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
A__ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p1/bias''' ):
A__ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
A__ : int = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
A__ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/p2/bias''' ):
A__ : List[str] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/ln''' ):
A__ : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player
A__ : Any = vnp.copy() # same because it is one dimensional
A__ : Any = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/g''' ):
A__ : int = '''model.blocks.%d.feed_forward.norm.weight''' % player
A__ : str = vnp.copy() # same because it is one dimensional
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/att''' ):
A__ : Optional[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A__ : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ : Dict = state[:, 0, :, :]
A__ : int = state[:, 1, :, :]
A__ : Optional[int] = state[:, 2, :, :]
A__ : Union[str, Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : int = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
A__ : Dict = torch.tensor(_UpperCamelCase )
A__ : Optional[int] = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
A__ : Dict = torch.tensor(_UpperCamelCase )
A__ : str = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
A__ : Optional[Any] = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/o/kernel''' ):
A__ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
A__ : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ : List[str] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/an''' ):
A__ : List[str] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player
A__ : List[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif key_name.endswith('''/g''' ):
A__ : int = '''model.blocks.%d.self_attn.norm.weight''' % player
A__ : Optional[Any] = vnp.copy() # same because it is one dimensional
A__ : int = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A__ : str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
A__ : Optional[Any] = '''model.%s.weight''' % nlayer
A__ : List[str] = vnp.copy() # same in embedded
A__ : Optional[int] = torch.tensor(_UpperCamelCase )
if key_name.startswith('''model/wte''' ):
A__ : List[str] = '''lm_head.weight'''
A__ : Optional[int] = vnp.copy() # same in embedded
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name.startswith('''model/wob''' ):
A__ : Optional[int] = '''final_logits_bias'''
A__ : List[Any] = vnp.copy() # same in embedded
A__ : List[str] = state.reshape((1, -1) )
A__ : str = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
A__ : int = '''model.last_project.weight'''
A__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ : List[Any] = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
A__ : Any = '''model.last_project.bias'''
A__ : Any = vnp.copy() # same because it is one dimensional
A__ : Optional[int] = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : str = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
from string import ascii_uppercase
_SCREAMING_SNAKE_CASE : List[Any] = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
A__ : List[Any] = ''''''
A__ : Dict = 0
A__ : Dict = 0
while div != 1:
A__ , A__ : Optional[int] = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
A__ : Optional[int] = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
A__ : Optional[Any] = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
A__ : Tuple = num // base
A__ : List[str] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 50 ) -> Tuple:
"""simple docstring"""
A__ : Dict = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_SCREAMING_SNAKE_CASE : List[str] = '\\n Text data.\n Second line of data.'
_SCREAMING_SNAKE_CASE : Dict = 'file'
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Dict = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
A__ : Dict = bytes(lowerCAmelCase_ , '''utf-8''' )
with zstd.open(lowerCAmelCase_ , '''wb''' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase_ ) , '''w''' ) as f:
f.write(lowerCAmelCase_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ):
"""simple docstring"""
A__ : List[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
A__ : Optional[Any] = input_paths[compression_format]
A__ : str = tmp_path / '''cache'''
A__ : int = DownloadConfig(cache_dir=lowerCAmelCase_ , extract_compressed_file=lowerCAmelCase_ )
A__ : Optional[Any] = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
with open(lowerCAmelCase_ ) as f:
A__ : List[str] = f.read()
with open(lowerCAmelCase_ ) as f:
A__ : str = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
"""simple docstring"""
A__ : Union[str, Any] = '''custom_cache'''
A__ : int = '''custom_extracted_dir'''
A__ : Dict = tmp_path / '''custom_extracted_path'''
if default_extracted:
A__ : Dict = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowerCAmelCase_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowerCAmelCase_ ) )
A__ : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A__ : Optional[int] = xz_file
A__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=lowerCAmelCase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase_ )
)
A__ : str = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
assert Path(lowerCAmelCase_ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Dict = str(Path(lowerCAmelCase_ ).resolve() )
assert cached_path(lowerCAmelCase_ ) == text_file
# relative path
A__ : Optional[int] = str(Path(lowerCAmelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase_ ) == text_file
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ):
"""simple docstring"""
A__ : str = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
# relative path
A__ : Optional[Any] = '''./__missing_file__.txt'''
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ):
"""simple docstring"""
A__ : str = get_from_cache(F"tmp://{tmpfs_file}" )
with open(lowerCAmelCase_ ) as f:
A__ : Dict = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ):
"""simple docstring"""
A__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase_ ):
http_get('''https://huggingface.co''' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowerCAmelCase_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
fsspec_head('''s3://huggingface.co''' )
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
from manim import *
class UpperCamelCase__ ( _UpperCAmelCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
A__ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A__ : Optional[Any] = [mem.copy() for i in range(6 )]
A__ : str = [mem.copy() for i in range(6 )]
A__ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : List[Any] = Text('''CPU''' , font_size=24 )
A__ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase__ )
A__ : List[Any] = [mem.copy() for i in range(4 )]
A__ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : List[str] = Text('''GPU''' , font_size=24 )
A__ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase__ )
A__ : Optional[Any] = [mem.copy() for i in range(6 )]
A__ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : Dict = Text('''Model''' , font_size=24 )
A__ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ )
model.move_to([3, -1.0, 0] )
self.add(lowercase__ )
A__ : Dict = []
for i, rect in enumerate(lowercase__ ):
rect.set_stroke(lowercase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A__ : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 )
self.add(lowercase__ )
cpu_targs.append(lowercase__ )
A__ : List[str] = [mem.copy() for i in range(6 )]
A__ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 )
A__ : str = Text('''Loaded Checkpoint''' , font_size=24 )
A__ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ : Union[str, Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase__ , lowercase__ )
A__ : List[Any] = MarkupText(
F"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A__ : List[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase__ ) , Write(lowercase__ ) )
self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) )
A__ : Optional[int] = []
A__ : List[str] = []
for i, rect in enumerate(lowercase__ ):
A__ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 )
target.move_to(lowercase__ )
first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) )
A__ : List[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) )
self.play(*lowercase__ )
self.play(*lowercase__ )
self.wait()
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int | float | str , __UpperCamelCase : int | float | str ) -> Dict:
"""simple docstring"""
if nth_term == "":
return [""]
A__ = int(a_ )
A__ = int(a_ )
A__ = []
for temp in range(int(a_ ) ):
series.append(F"1 / {pow(temp + 1 , int(a_ ) )}" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : int = int(input('Enter the last number (nth term) of the P-Series'))
_SCREAMING_SNAKE_CASE : Optional[int] = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Any = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : Dict = {}
A__ : Tuple = source_vertex
def __snake_case ( self ):
A__ : Optional[int] = {self.source_vertex}
A__ : str = None
A__ : Dict = [self.source_vertex] # first in first out queue
while queue:
A__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase__ )
A__ : Union[str, Any] = vertex
queue.append(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : str = self.parent.get(UpperCamelCase__ )
if target_vertex_parent is None:
A__ : Tuple = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(UpperCamelCase__ )
return self.shortest_path(UpperCamelCase__ ) + F"->{target_vertex}"
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
from math import loga
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Tuple = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
A__ : Tuple = {}
A__ : Union[str, Any] = source_vertex
def __snake_case ( self ):
A__ : List[Any] = {self.source_vertex}
A__ : Dict = None
A__ : Optional[int] = [self.source_vertex] # first in first out queue
while queue:
A__ : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_snake_case )
A__ : Union[str, Any] = vertex
queue.append(_snake_case )
def __snake_case ( self , UpperCamelCase__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
A__ : Optional[Any] = self.parent.get(_snake_case )
if target_vertex_parent is None:
A__ : List[str] = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_snake_case )
return self.shortest_path(_snake_case ) + F"->{target_vertex}"
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> str:
"""simple docstring"""
A__ : Dict = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A__ : Any = MaskFormerConfig(backbone_config=__UpperCamelCase )
A__ : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
A__ : Optional[int] = 8_47
A__ : Optional[int] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
A__ : List[Any] = 1_50
A__ : Union[str, Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
A__ : Any = 1_71
A__ : Dict = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
A__ : int = 1_33
A__ : Any = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
A__ : Tuple = 19
A__ : Union[str, Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
A__ : Dict = 65
A__ : int = '''mapillary-vistas-id2label.json'''
A__ : Optional[int] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Tuple = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int ) -> str:
"""simple docstring"""
A__ : List[Any] = dct.pop(__UpperCamelCase )
A__ : List[str] = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ : Union[str, Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
A__ : Dict = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Optional[int] = in_proj_weight[:dim, :]
A__ : str = in_proj_bias[: dim]
A__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
A__ : Any = in_proj_bias[
dim : dim * 2
]
A__ : int = in_proj_weight[
-dim :, :
]
A__ : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ : Optional[int] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
A__ : List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[: hidden_size, :]
A__ : Tuple = in_proj_bias[:config.hidden_size]
A__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
A__ : Optional[int] = in_proj_weight[-hidden_size :, :]
A__ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ : str = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
A__ : Tuple = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Any = in_proj_weight[: hidden_size, :]
A__ : Tuple = in_proj_bias[:config.hidden_size]
A__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ : int = in_proj_bias[hidden_size : hidden_size * 2]
A__ : Optional[Any] = in_proj_weight[-hidden_size :, :]
A__ : Any = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] = False ) -> Optional[int]:
"""simple docstring"""
A__ : str = get_maskformer_config(__UpperCamelCase )
# load original state_dict
with open(__UpperCamelCase , '''rb''' ) as f:
A__ : str = pickle.load(__UpperCamelCase )
A__ : List[str] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ : Optional[Any] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase , __UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
A__ : Tuple = torch.from_numpy(__UpperCamelCase )
# load 🤗 model
A__ : Any = MaskFormerForInstanceSegmentation(__UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__UpperCamelCase , param.shape )
A__ , A__ : Dict = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__UpperCamelCase ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
A__ : List[str] = prepare_img()
if "vistas" in model_name:
A__ : Any = 65
elif "cityscapes" in model_name:
A__ : Dict = 6_55_35
else:
A__ : Dict = 2_55
A__ : Optional[Any] = True if '''ade''' in model_name else False
A__ : Optional[int] = MaskFormerImageProcessor(ignore_index=__UpperCamelCase , reduce_labels=__UpperCamelCase )
A__ : Any = image_processor(__UpperCamelCase , return_tensors='''pt''' )
A__ : Union[str, Any] = model(**__UpperCamelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ : Optional[int] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
A__ : List[str] = argparse.ArgumentParser(add_help=snake_case_ , allow_abbrev=snake_case_ )
# The main config parser
A__ : Union[str, Any] = config_command_parser(snake_case_ )
# The subparser to add commands to
A__ : str = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(snake_case_ , parents=[parent_parser] )
update_command_parser(snake_case_ , parents=[parent_parser] )
return config_parser
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ : Optional[int] = get_config_parser()
A__ : Union[str, Any] = config_parser.parse_args()
if not hasattr(snake_case_ , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(snake_case_ )
if __name__ == "__main__":
main()
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_SCREAMING_SNAKE_CASE : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_SCREAMING_SNAKE_CASE : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Vector , __UpperCamelCase : Vector ) -> str:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowerCamelCase_ ) - np.asarray(lowerCamelCase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Vector , __UpperCamelCase : Vector ) -> List[Any]:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowerCamelCase_ , lowerCamelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : List[Any] = 1
A__ : int = True
for v in tree[start]:
if v not in visited:
ret += dfs(__snake_case )
if ret % 2 == 0:
cuts.append(__snake_case )
return ret
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = 1_0, 9
_SCREAMING_SNAKE_CASE : Tuple = defaultdict(list)
_SCREAMING_SNAKE_CASE : dict[int, bool] = {}
_SCREAMING_SNAKE_CASE : list[int] = []
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( __lowercase ):
'''simple docstring'''
_lowerCAmelCase = 'microsoft/speecht5_tts'
_lowerCAmelCase = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_lowerCAmelCase = 'text_reader'
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ['text']
_lowerCAmelCase = ['audio']
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=__A , return_tensors='''pt''' , truncation=__A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[str] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : List[str] = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**__A )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(__A ).cpu().detach()
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_SCREAMING_SNAKE_CASE : int = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = None
A__ : int = len(__A )
A__ : Optional[int] = [any_type for _ in range(self.N )] + arr
A__ : Optional[Any] = fnc
self.build()
def __snake_case ( self ):
for p in range(self.N - 1 , 0 , -1 ):
A__ : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
p += self.N
A__ : Tuple = v
while p > 1:
A__ : str = p // 2
A__ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ): # noqa: E741
A__ , A__ : Dict = l + self.N, r + self.N
A__ : List[Any] = None
while l <= r:
if l % 2 == 1:
A__ : Tuple = self.st[l] if res is None else self.fn(__A , self.st[l] )
if r % 2 == 0:
A__ : Dict = self.st[r] if res is None else self.fn(__A , self.st[r] )
A__ , A__ : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_SCREAMING_SNAKE_CASE : Tuple = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
_SCREAMING_SNAKE_CASE : Tuple = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
_SCREAMING_SNAKE_CASE : List[str] = SegmentTree(test_array, min)
_SCREAMING_SNAKE_CASE : int = SegmentTree(test_array, max)
_SCREAMING_SNAKE_CASE : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
for i in range(len(UpperCamelCase__ ) ):
for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
A__ : str = reduce(UpperCamelCase__ , test_array[i : j + 1] )
A__ : int = reduce(UpperCamelCase__ , test_array[i : j + 1] )
A__ : str = reduce(lambda __UpperCamelCase , __UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert max_range == max_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert sum_range == sum_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
_SCREAMING_SNAKE_CASE : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
_SCREAMING_SNAKE_CASE : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : int ) -> str:
"""simple docstring"""
A__ : str = [False] * len(_UpperCAmelCase )
A__ : Optional[Any] = [s]
A__ : int = True
while queue:
A__ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
A__ : Optional[int] = True
A__ : Tuple = u
return visited[t]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
A__ : Dict = [-1] * (len(_UpperCAmelCase ))
A__ : Union[str, Any] = 0
A__ : Optional[Any] = []
A__ : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A__ : Tuple = float('''Inf''' )
A__ : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
A__ : Dict = min(_UpperCAmelCase , graph[parent[s]][s] )
A__ : Optional[int] = parent[s]
max_flow += path_flow
A__ : Tuple = sink
while v != source:
A__ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A__ : Tuple = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = 'https://openaipublic.azureedge.net/jukebox/models/'
_SCREAMING_SNAKE_CASE : Dict = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ : Union[str, Any] = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ : Optional[int] = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ : Optional[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ : Dict = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
A__ : Optional[int] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
A__ : int = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ : Optional[Any] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
A__ : int = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ : str = {}
import re
A__ : Any = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ : Union[str, Any] = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ : Dict = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ : Union[str, Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ : List[Any] = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ : Any = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ : Tuple = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
A__ : List[str] = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ : str = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__UpperCamelCase ):
A__ : Optional[Any] = re_encoder_block_conv_in.match(__UpperCamelCase )
A__ : Optional[int] = regex_match.groups()
A__ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A__ : Dict = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
A__ : Dict = re_encoder_block_conv_in.sub(__UpperCamelCase , __UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(__UpperCamelCase ):
A__ : str = re_encoder_block_resnet.match(__UpperCamelCase )
A__ : Tuple = regex_match.groups()
A__ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
A__ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
A__ : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
A__ : List[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ : Tuple = prefix + resnet_block
A__ : Optional[Any] = re_encoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(__UpperCamelCase ):
A__ : Tuple = re_encoder_block_proj_out.match(__UpperCamelCase )
A__ : Optional[Any] = regex_match.groups()
A__ : Optional[int] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
A__ : Union[str, Any] = re_encoder_block_proj_out.sub(__UpperCamelCase , __UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__UpperCamelCase ):
A__ : Dict = re_decoder_block_conv_out.match(__UpperCamelCase )
A__ : Union[str, Any] = regex_match.groups()
A__ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ : List[str] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
A__ : str = re_decoder_block_conv_out.sub(__UpperCamelCase , __UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(__UpperCamelCase ):
A__ : Any = re_decoder_block_resnet.match(__UpperCamelCase )
A__ : int = regex_match.groups()
A__ : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ : int = {'1': 1, '3': 2}[groups[-2]]
A__ : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
A__ : Dict = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ : Union[str, Any] = prefix + resnet_block
A__ : Any = re_decoder_block_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(__UpperCamelCase ):
A__ : int = re_decoder_block_proj_in.match(__UpperCamelCase )
A__ : Optional[Any] = regex_match.groups()
A__ : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
A__ : Optional[int] = re_decoder_block_proj_in.sub(__UpperCamelCase , __UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__UpperCamelCase ):
A__ : str = re_prior_cond_conv_out.match(__UpperCamelCase )
A__ : Optional[int] = regex_match.groups()
A__ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
A__ : Tuple = re_prior_cond_conv_out.sub(__UpperCamelCase , __UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(__UpperCamelCase ):
A__ : Dict = re_prior_cond_resnet.match(__UpperCamelCase )
A__ : Optional[int] = regex_match.groups()
A__ : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ : Any = {'1': 1, '3': 2}[groups[-2]]
A__ : Any = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
A__ : List[str] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ : int = prefix + resnet_block
A__ : List[str] = re_prior_cond_resnet.sub(__UpperCamelCase , __UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(__UpperCamelCase ):
A__ : Optional[Any] = re_prior_cond_proj_in.match(__UpperCamelCase )
A__ : str = regex_match.groups()
A__ : Optional[Any] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
A__ : Tuple = re_prior_cond_proj_in.sub(__UpperCamelCase , __UpperCamelCase )
# keep original key
else:
A__ : int = original_key
A__ : Optional[int] = replace_key(__UpperCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
A__ : str = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
A__ : List[str] = original_key
A__ : List[Any] = original_key
A__ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str=None , __UpperCamelCase : int=None ) -> str:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
A__ : Union[str, Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=__UpperCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=__UpperCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content )
A__ : Union[str, Any] = MODEL_MAPPING[model_name.split('''/''' )[-1]]
A__ : Any = JukeboxConfig.from_pretrained(__UpperCamelCase )
A__ : Optional[Any] = JukeboxModel(__UpperCamelCase )
A__ : Optional[int] = []
A__ : Union[str, Any] = {}
for i, dict_name in enumerate(__UpperCamelCase ):
A__ : List[Any] = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
A__ : List[Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
A__ : Any = old_dic[k]
elif k.endswith('''.w''' ):
A__ : List[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ : str = old_dic[k]
else:
A__ : List[Any] = old_dic[k]
A__ : Union[str, Any] = 'vqvae' if i == 0 else F"priors.{3 - i}"
A__ : Any = fix_jukebox_keys(__UpperCamelCase , model.state_dict() , __UpperCamelCase , __UpperCamelCase )
weight_dict.append(__UpperCamelCase )
A__ : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile:
json.dump(__UpperCamelCase , __UpperCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
return weight_dict
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A__ : List[str] = 0
for ch in input_str:
A__ : Any = ord(_lowercase )
A__ : List[str] = pow(2 , _lowercase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
A__ : Optional[Any] = val
A__ : Any = None
A__ : Optional[Any] = None
def __snake_case ( self , UpperCamelCase__ ):
if self.val:
if val < self.val:
if self.left is None:
A__ : Any = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
A__ : str = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
A__ : Tuple = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , _lowerCamelCase )
res.append(root.val )
inorder(root.right , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return arr
A__ : Optional[int] = Node(arr[0] )
for i in range(1 , len(_lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
A__ : Tuple = []
inorder(_lowerCamelCase , _lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ : Any = HfArgumentParser(lowercase_ )
A__ : List[Any] = parser.parse_args_into_dataclasses()[0]
A__ : str = TensorFlowBenchmark(args=lowercase_ )
try:
A__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ : Tuple = """ """.join(str(lowercase_ ).split(''' ''' )[:-1] )
A__ : str = """"""
A__ : Tuple = eval(str(lowercase_ ).split(''' ''' )[-1] )
A__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ : str = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
for attribute in key.split('''.''' ):
A__ : int = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
A__ : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
A__ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
A__ : List[Any] = value
elif weight_type == "weight_g":
A__ : int = value
elif weight_type == "weight_v":
A__ : Optional[int] = value
elif weight_type == "bias":
A__ : Tuple = value
else:
A__ : Union[str, Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict = []
A__ : str = fairseq_model.state_dict()
A__ : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
A__ : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ : str = True
if "*" in mapped_key:
A__ : Any = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
A__ : str = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
A__ : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
A__ : List[str] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
A__ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ : Optional[int] = '''weight'''
else:
A__ : int = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
A__ : Tuple = name.split('''.''' )
A__ : int = int(items[0] )
A__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
A__ : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
A__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
A__ : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
A__ : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str]=None ) -> str:
"""simple docstring"""
A__ : Dict = torch.load(lowerCAmelCase__ )
A__ : Optional[Any] = WavLMConfigOrig(checkpoint['''cfg'''] )
A__ : int = WavLMOrig(lowerCAmelCase__ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
A__ : Tuple = WavLMConfig.from_pretrained(lowerCAmelCase__ )
else:
A__ : Dict = WavLMConfig()
A__ : Optional[int] = WavLMModel(lowerCAmelCase__ )
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavlm.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_SCREAMING_SNAKE_CASE : List[str] = None
try:
import msvcrt
except ImportError:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
try:
import fcntl
except ImportError:
_SCREAMING_SNAKE_CASE : List[str] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_SCREAMING_SNAKE_CASE : Tuple = OSError
# Data
# ------------------------------------------------
_SCREAMING_SNAKE_CASE : Optional[int] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
_SCREAMING_SNAKE_CASE : List[str] = """3.0.12"""
_SCREAMING_SNAKE_CASE : Tuple = None
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
global _logger
A__ : Dict = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
A__ : List[str] = lock_file
return None
def __str__( self ):
A__ : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
A__ : Union[str, Any] = lock
return None
def __enter__( self ):
return self.lock
def __exit__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.lock.release()
return None
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=-1 , UpperCamelCase__=None ):
A__ : Any = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
A__ : Tuple = self.hash_filename_if_too_long(UpperCamelCase__ , UpperCamelCase__ )
# The path to the lock file.
A__ : Any = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A__ : Union[str, Any] = None
# The default timeout value.
A__ : List[str] = timeout
# We use this lock primarily for the lock counter.
A__ : Dict = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A__ : List[Any] = 0
return None
@property
def __snake_case ( self ):
return self._lock_file
@property
def __snake_case ( self ):
return self._timeout
@timeout.setter
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = float(UpperCamelCase__ )
return None
def __snake_case ( self ):
raise NotImplementedError()
def __snake_case ( self ):
raise NotImplementedError()
@property
def __snake_case ( self ):
return self._lock_file_fd is not None
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=0.0_5 ):
if timeout is None:
A__ : Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A__ : Any = id(self )
A__ : Optional[int] = self._lock_file
A__ : str = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(UpperCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A__ : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __snake_case ( self , UpperCamelCase__=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A__ : Optional[int] = id(self )
A__ : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
A__ : Dict = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
self.acquire()
return self
def __exit__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.release()
return None
def __del__( self ):
self.release(force=UpperCamelCase__ )
return None
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = os.path.basename(UpperCamelCase__ )
if len(UpperCamelCase__ ) > max_length and max_length > 0:
A__ : List[str] = os.path.dirname(UpperCamelCase__ )
A__ : Optional[Any] = str(hash(UpperCamelCase__ ) )
A__ : str = filename[: max_length - len(UpperCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(UpperCamelCase__ , UpperCamelCase__ )
else:
return path
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=-1 , UpperCamelCase__=None ):
from .file_utils import relative_to_absolute_path
super().__init__(UpperCamelCase__ , timeout=UpperCamelCase__ , max_filename_length=UpperCamelCase__ )
A__ : Any = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __snake_case ( self ):
A__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A__ : Union[str, Any] = os.open(self._lock_file , UpperCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCamelCase__ )
else:
A__ : Dict = fd
return None
def __snake_case ( self ):
A__ : Optional[int] = self._lock_file_fd
A__ : str = None
msvcrt.locking(UpperCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=-1 , UpperCamelCase__=None ):
A__ : int = os.statvfs(os.path.dirname(UpperCamelCase__ ) ).f_namemax
super().__init__(UpperCamelCase__ , timeout=UpperCamelCase__ , max_filename_length=UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A__ : List[str] = os.open(self._lock_file , UpperCamelCase__ )
try:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCamelCase__ )
else:
A__ : Union[str, Any] = fd
return None
def __snake_case ( self ):
A__ : str = self._lock_file_fd
A__ : int = None
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
os.close(UpperCamelCase__ )
return None
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self ):
A__ : List[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A__ : Any = os.open(self._lock_file , UpperCamelCase__ )
except OSError:
pass
else:
A__ : Optional[Any] = fd
return None
def __snake_case ( self ):
os.close(self._lock_file_fd )
A__ : List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_SCREAMING_SNAKE_CASE : str = None
if msvcrt:
_SCREAMING_SNAKE_CASE : int = WindowsFileLock
elif fcntl:
_SCREAMING_SNAKE_CASE : Tuple = UnixFileLock
else:
_SCREAMING_SNAKE_CASE : List[str] = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
import re
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , __UpperCamelCase ) ) != len(__UpperCamelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : Union[str, Any] = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
_lowerCAmelCase = 42 # Cache store of keys
_lowerCAmelCase = 42 # References of the keys in cache
_lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , UpperCamelCase__ ):
A__ : List[str] = deque()
A__ : Any = set()
if not n:
A__ : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
A__ : str = n
def __snake_case ( self , UpperCamelCase__ ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase__ )
else:
self.dq_store.remove(lowercase__ )
self.dq_store.appendleft(lowercase__ )
self.key_reference.add(lowercase__ )
def __snake_case ( self ):
for k in self.dq_store:
print(lowercase__ )
def __repr__( self ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Tuple = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
A__ : Any = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
A__ : Any = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A__ : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A__ : Optional[Any] = gray_code_sequence_string(bit_count - 1 )
A__ : Union[str, Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A__ : Optional[Any] = '0' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A__ : int = '1' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1_00_00_00 , __UpperCamelCase : int = 10 ):
"""simple docstring"""
A__ : defaultdict = defaultdict(__lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A__ : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
A__ : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
import re
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = '''</w>'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''@@ '''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
# Speech2Text2 has no max input length
_SCREAMING_SNAKE_CASE : int = {'''facebook/s2t-wav2vec2-large-en-de''': 1_0_2_4}
class UpperCamelCase__ ( __a ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="<pad>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=False , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , do_lower_case=snake_case__ , **snake_case__ , )
A__ = do_lower_case
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
A__ = json.load(snake_case__ )
A__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
A__ = None
A__ = None
else:
with open(snake_case__ , encoding='''utf-8''' ) as merges_handle:
A__ = merges_handle.read().split('''\n''' )[:-1]
A__ = [tuple(merge.split()[:2] ) for merge in merges]
A__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A__ = {}
@property
def __snake_case ( self ):
return len(self.decoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCamelCase__ ):
A__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A__ = get_pairs(snake_case__ )
if not pairs:
return token
while True:
A__ = min(snake_case__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(snake_case__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(snake_case__ ):
try:
A__ = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(snake_case__ )
A__ = new_word
if len(snake_case__ ) == 1:
break
else:
A__ = get_pairs(snake_case__ )
A__ = ''' '''.join(snake_case__ )
if word == "\n " + BPE_TOKEN_MERGES:
A__ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(snake_case__ ):
A__ = word.replace(snake_case__ , '''''' )
A__ = word.replace(''' ''' , snake_case__ )
A__ = word
return word
def __snake_case ( self , UpperCamelCase__ ):
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
A__ = text.lower()
A__ = text.split()
A__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(snake_case__ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self , UpperCamelCase__ ):
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCamelCase__ ):
A__ = self.decoder.get(snake_case__ , self.unk_token )
return result
def __snake_case ( self , UpperCamelCase__ ):
A__ = ''' '''.join(snake_case__ )
# make sure @@ tokens are concatenated
A__ = ''''''.join(string.split(snake_case__ ) )
return string
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A__ = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
A__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
A__ = token_index
writer.write(''' '''.join(snake_case__ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase__ ( ctypes.Structure ):
'''simple docstring'''
_lowerCAmelCase = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
if os.name == "nt":
A__ : List[Any] = CursorInfo()
A__ : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
A__ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
A__ : str = CursorInfo()
A__ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
A__ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
A__ : Dict = XCLIPTextConfig()
# derive patch size from model name
A__ : Tuple = model_name.find('''patch''' )
A__ : Tuple = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
A__ : Tuple = XCLIPVisionConfig(patch_size=_snake_case , num_frames=_snake_case )
if "large" in model_name:
A__ : List[Any] = 7_68
A__ : Optional[Any] = 30_72
A__ : Optional[Any] = 12
A__ : Dict = 10_24
A__ : Optional[int] = 40_96
A__ : Union[str, Any] = 16
A__ : int = 24
A__ : List[Any] = 7_68
A__ : str = 30_72
if model_name == "xclip-large-patch14-16-frames":
A__ : List[str] = 3_36
A__ : List[Any] = XCLIPConfig.from_text_vision_configs(_snake_case , _snake_case )
if "large" in model_name:
A__ : int = 7_68
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if name == "token_embedding.weight":
A__ : Any = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
A__ : int = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
A__ : Optional[int] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
A__ : Dict = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
A__ : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
A__ : Union[str, Any] = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
A__ : List[Any] = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
A__ : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
A__ : Tuple = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
A__ : List[str] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
A__ : Union[str, Any] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
A__ : List[str] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
A__ : int = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
A__ : Union[str, Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
A__ : List[str] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
A__ : List[Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
A__ : Tuple = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
A__ : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
A__ : Any = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
A__ : Dict = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
A__ : Any = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
A__ : Optional[int] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ : Union[str, Any] = orig_state_dict.pop(_snake_case )
if "attn.in_proj" in key:
A__ : List[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
A__ : Union[str, Any] = key_split[3]
A__ : Tuple = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A__ : int = val[
:dim, :
]
A__ : Optional[Any] = val[
dim : dim * 2, :
]
A__ : Any = val[
-dim:, :
]
else:
A__ : Optional[int] = val[
:dim
]
A__ : Dict = val[
dim : dim * 2
]
A__ : str = val[
-dim:
]
else:
if "weight" in key:
A__ : Tuple = val[
:dim, :
]
A__ : Optional[Any] = val[
dim : dim * 2, :
]
A__ : List[Any] = val[
-dim:, :
]
else:
A__ : str = val[:dim]
A__ : int = val[
dim : dim * 2
]
A__ : Optional[int] = val[-dim:]
elif key.startswith('''mit''' ):
A__ : int = key_split[2]
A__ : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
A__ : str = val[:dim, :]
A__ : Dict = val[dim : dim * 2, :]
A__ : Optional[Any] = val[-dim:, :]
else:
A__ : int = val[:dim]
A__ : List[str] = val[dim : dim * 2]
A__ : Any = val[-dim:]
else:
A__ : Optional[Any] = key_split[2]
A__ : int = config.text_config.hidden_size
if "weight" in key:
A__ : Any = val[:dim, :]
A__ : Optional[Any] = val[
dim : dim * 2, :
]
A__ : Optional[int] = val[-dim:, :]
else:
A__ : Dict = val[:dim]
A__ : int = val[
dim : dim * 2
]
A__ : Tuple = val[-dim:]
else:
A__ : Any = rename_key(_snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A__ : List[Any] = val.T
A__ : Any = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
if num_frames == 8:
A__ : List[str] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
A__ : List[str] = '''eating_spaghetti.npy'''
elif num_frames == 32:
A__ : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
A__ : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=_snake_case , repo_type='''dataset''' , )
A__ : Dict = np.load(_snake_case )
return list(_snake_case )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : int=None , __UpperCamelCase : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
A__ : Any = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
A__ : Optional[int] = model_to_url[model_name]
A__ : str = 8
if "16-frames" in model_name:
A__ : Any = 16
elif "shot" in model_name:
A__ : Any = 32
A__ : Optional[Any] = get_xclip_config(_snake_case , _snake_case )
A__ : Union[str, Any] = XCLIPModel(_snake_case )
model.eval()
if "drive" in checkpoint_url:
A__ : int = '''pytorch_model.bin'''
gdown.cached_download(_snake_case , _snake_case , quiet=_snake_case )
A__ : Tuple = torch.load(_snake_case , map_location='''cpu''' )['''model''']
else:
A__ : Optional[Any] = torch.hub.load_state_dict_from_url(_snake_case )['''model''']
A__ : Any = convert_state_dict(_snake_case , _snake_case )
A__ : Union[str, Any] = XCLIPModel(_snake_case )
A__ , A__ : Any = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A__ : Tuple = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24
A__ : Tuple = VideoMAEImageProcessor(size=_snake_case )
A__ : str = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
A__ : Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
A__ : Optional[Any] = XCLIPProcessor(image_processor=_snake_case , tokenizer=_snake_case )
A__ : List[Any] = prepare_video(_snake_case )
A__ : Optional[int] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=_snake_case , return_tensors='''pt''' , padding=_snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
A__ : List[str] = model(**_snake_case )
# Verify outputs
A__ : Dict = outputs.logits_per_video
A__ : Union[str, Any] = logits_per_video.softmax(dim=1 )
print('''Probs:''' , _snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
A__ : Optional[Any] = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
A__ : Dict = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
A__ : List[Any] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
A__ : Dict = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
A__ : str = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
A__ : Union[str, Any] = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A__ : List[Any] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A__ : Optional[int] = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A__ : str = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A__ : Union[str, Any] = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A__ : List[str] = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A__ : Dict = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A__ : List[Any] = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A__ : Dict = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A__ : Union[str, Any] = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A__ : str = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A__ : Optional[int] = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A__ : List[Any] = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(_snake_case , organization='''nielsr''' )
processor.push_to_hub(_snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(_snake_case , organization='''nielsr''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> bool:
"""simple docstring"""
A__ : Tuple = first_str.lower().strip()
A__ : str = second_str.lower().strip()
# Remove whitespace
A__ : Optional[Any] = first_str.replace(''' ''' , '''''' )
A__ : str = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_A ) != len(_A ):
return False
# Default values for count should be 0
A__ : int = defaultdict(_A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = input('Enter the first string ').strip()
_SCREAMING_SNAKE_CASE : str = input('Enter the second string ').strip()
_SCREAMING_SNAKE_CASE : Union[str, Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
# flake8: noqa
# Lint as: python3
_SCREAMING_SNAKE_CASE : Tuple = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {'''vocab_file''': '''vocab.txt'''}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
_SCREAMING_SNAKE_CASE : str = {
'''YituTech/conv-bert-base''': 5_1_2,
'''YituTech/conv-bert-medium-small''': 5_1_2,
'''YituTech/conv-bert-small''': 5_1_2,
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class UpperCamelCase__ ( snake_case__ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ConvBertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
A__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
A__ : List[str] = getattr(_A , normalizer_state.pop('''type''' ) )
A__ : str = do_lower_case
A__ : str = strip_accents
A__ : Optional[Any] = tokenize_chinese_chars
A__ : str = normalizer_class(**_A )
A__ : Any = do_lower_case
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : List[Any] = [self.sep_token_id]
A__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
A__ : Union[str, Any] = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCamelCase__ ( lowercase_ ):
'''simple docstring'''
_lowerCAmelCase = "luke"
def __init__( self , UpperCamelCase__=5_0267 , UpperCamelCase__=50_0000 , UpperCamelCase__=768 , UpperCamelCase__=256 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
A__ : int = vocab_size
A__ : int = entity_vocab_size
A__ : Optional[Any] = hidden_size
A__ : Union[str, Any] = entity_emb_size
A__ : Union[str, Any] = num_hidden_layers
A__ : str = num_attention_heads
A__ : Optional[int] = hidden_act
A__ : Union[str, Any] = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : Any = initializer_range
A__ : Any = layer_norm_eps
A__ : str = use_entity_aware_attention
A__ : List[str] = classifier_dropout
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase__ ( __lowerCamelCase ):
'''simple docstring'''
@slow
@require_torch
def __snake_case ( self ):
A__ : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
A__ : int = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A__ : int = bertabert.config.encoder.vocab_size
A__ : Union[str, Any] = tokenizer.sep_token_id
A__ : Tuple = tokenizer.cls_token_id
A__ : Union[str, Any] = 128
A__ : Tuple = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
A__ : Optional[Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
A__ : str = train_dataset.select(range(32 ) )
A__ : int = val_dataset.select(range(16 ) )
A__ : str = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ : Dict = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=512 )
A__ : Optional[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=128 )
A__ : Any = inputs.input_ids
A__ : int = inputs.attention_mask
A__ : List[str] = outputs.input_ids
A__ : List[Any] = outputs.input_ids.copy()
A__ : Optional[Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A__ : int = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase__ ):
A__ : List[str] = pred.label_ids
A__ : Tuple = pred.predictions
# all unnecessary tokens are removed
A__ : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
A__ : int = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
A__ : Union[str, Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
A__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
A__ : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
A__ : str = self.get_auto_remove_tmp_dir()
A__ : Tuple = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy='''steps''' , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ : Dict = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : Any = int(lowerCamelCase__ )
assert noofclusters < len(lowerCamelCase__ )
# Find out the dimensionality
A__ : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
A__ : List[str] = list(range(len(lowerCamelCase__ ) ) )
shuffle(lowerCamelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A__ : List[str] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A__ : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
A__ : Tuple = tf.placeholder('''float64''' , [dim] )
A__ : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A__ : str = [tf.Variable(0 ) for i in range(len(lowerCamelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A__ : Dict = tf.placeholder('''int32''' )
A__ : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A__ : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A__ : Optional[Any] = tf.reduce_mean(lowerCamelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
A__ : Optional[int] = tf.placeholder('''float''' , [dim] )
A__ : int = tf.placeholder('''float''' , [dim] )
A__ : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase__ , lowerCamelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A__ : Optional[int] = tf.placeholder('''float''' , [noofclusters] )
A__ : List[Any] = tf.argmin(lowerCamelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A__ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A__ : Optional[int] = 1_00
for _ in range(lowerCamelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase__ ) ):
A__ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A__ : int = [
sess.run(lowerCamelCase__ , feed_dict={va: vect, va: sess.run(lowerCamelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A__ : Union[str, Any] = sess.run(
lowerCamelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase__ ):
# Collect all the vectors assigned to this cluster
A__ : Optional[int] = [
vectors[i]
for i in range(len(lowerCamelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A__ : Optional[int] = sess.run(
lowerCamelCase__ , feed_dict={mean_input: array(lowerCamelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A__ : str = sess.run(lowerCamelCase__ )
A__ : str = sess.run(lowerCamelCase__ )
return centroids, assignments
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.