code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = BarthezTokenizer
A_ : Optional[Any] = BarthezTokenizerFast
A_ : str = True
A_ : str = True
def UpperCAmelCase_ ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE : Any = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = """<pad>"""
_SCREAMING_SNAKE_CASE : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_1122 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_SCREAMING_SNAKE_CASE : Optional[Any] = [0, 57, 3018, 7_0307, 91, 2]
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
__snake_case , max_length=len(__snake_case ) , padding=__snake_case , truncation=__snake_case , return_tensors="""pt""" )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Any = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Any = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
_SCREAMING_SNAKE_CASE : int = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(__snake_case )
_SCREAMING_SNAKE_CASE : int = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
_SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_SCREAMING_SNAKE_CASE : str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__snake_case , )
| 200
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ : Union[str, Any] = pytest.mark.integration
UpperCAmelCase_ : List[Any] = {'comet'}
UpperCAmelCase_ : int = importlib.util.find_spec('fairseq') is not None
UpperCAmelCase_ : Optional[Any] = {'code_eval'}
UpperCAmelCase_ : Optional[int] = os.name == 'nt'
UpperCAmelCase_ : Dict = {'bertscore', 'frugalscore', 'perplexity'}
UpperCAmelCase_ : Dict = importlib.util.find_spec('transformers') is not None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class lowercase__ ( parameterized.TestCase ):
'''simple docstring'''
A_ : Optional[int] = {}
A_ : Union[str, Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : str = """[...]"""
_SCREAMING_SNAKE_CASE : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
_SCREAMING_SNAKE_CASE : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case )
# check parameters
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__snake_case , metric_module.__name__ ):
with self.use_local_metrics():
try:
_SCREAMING_SNAKE_CASE : int = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = """[...]"""
_SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
# run doctest
with self.use_local_metrics():
_SCREAMING_SNAKE_CASE : List[str] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ):
yield
else:
yield
@contextmanager
def UpperCAmelCase_ ( self ):
def load_local_metric(__snake_case , *__snake_case , **__snake_case ):
return load_metric(os.path.join("""metrics""" , __snake_case ) , *__snake_case , **__snake_case )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_local_metric
yield
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
def wrapper(__snake_case ):
_SCREAMING_SNAKE_CASE : Any = contextmanager(__snake_case )
_SCREAMING_SNAKE_CASE : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowercase__ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
_SCREAMING_SNAKE_CASE : Any = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
_SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE__ ):
class lowercase__ :
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case , *__snake_case , **__snake_case ):
assert len(__snake_case ) == 2
_SCREAMING_SNAKE_CASE : Dict = [0.19, 0.92]
return scores, sum(__snake_case ) / len(__snake_case )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
_SCREAMING_SNAKE_CASE : Any = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
_SCREAMING_SNAKE_CASE : List[str] = load_from_checkpoint
yield
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
_SCREAMING_SNAKE_CASE : List[str] = """ERROR"""
_SCREAMING_SNAKE_CASE : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 200
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : Tuple = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = ["OwlViTFeatureExtractor"]
lowercase : int = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 171
|
from math import sqrt
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00_00_00 ):
__UpperCamelCase : int = 0
__UpperCamelCase : int = 0
__UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 171
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a (A__ ):
"""simple docstring"""
__UpperCAmelCase : int = CustomTokenizer
pass
| 123
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_file.split(os.path.sep)
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''')
SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith('py'):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''')
if not test_fn.startswith('test_modeling_'):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''')
SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace('.py' , '')]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCAmelCase)
return test_module_path
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_module_path(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCAmelCase)
return test_module
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
if attr.endswith('ModelTester'):
tester_classes.append(getattr(_UpperCAmelCase , _UpperCAmelCase))
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = get_test_module(_UpperCAmelCase)
for attr in dir(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , _UpperCAmelCase)
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE = getattr(_UpperCAmelCase , 'all_model_classes' , [])
if len(_UpperCAmelCase) > 0:
test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_class()
if hasattr(_UpperCAmelCase , 'setUp'):
test.setUp()
SCREAMING_SNAKE_CASE = None
if hasattr(_UpperCAmelCase , 'model_tester'):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(_UpperCAmelCase)
if tester_class is not None:
tester_classes.append(_UpperCAmelCase)
# sort with class names
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: x.__name__)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_test_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(_UpperCAmelCase) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_model_classes(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(_UpperCAmelCase , _UpperCAmelCase) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase__ (_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return o.__name__
elif isinstance(_UpperCAmelCase , (list, tuple)):
return [to_json(_UpperCAmelCase) for x in o]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {to_json(_UpperCAmelCase): to_json(_UpperCAmelCase) for k, v in o.items()}
else:
return o
| 137
| 0
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = [0] * no_of_processes
_UpperCamelCase : Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase__ ):
_UpperCamelCase : int = burst_time[i]
_UpperCamelCase : Dict = []
_UpperCamelCase : Tuple = 0
_UpperCamelCase : str = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCamelCase : Dict = []
_UpperCamelCase : Union[str, Any] = -1
for i in range(lowerCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_UpperCamelCase : Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCamelCase : List[str] = i
total_time += burst_time[target_process]
completed += 1
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : int = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
_UpperCamelCase : Any = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
snake_case_ : Union[str, Any] = 4
snake_case_ : str = [2, 5, 3, 7]
snake_case_ : str = [0, 0, 0, 0]
snake_case_ : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
snake_case_ : str = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 353
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = DanceDiffusionPipeline
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
lowercase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : str = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCamelCase__ ,use_timestep_embedding=lowerCamelCase__ ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_UpperCamelCase : int = IPNDMScheduler()
_UpperCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : str = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : List[str] = self.get_dummy_components()
_UpperCamelCase : int = DanceDiffusionPipeline(**lowerCamelCase__ )
_UpperCamelCase : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = pipe(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = output.audios
_UpperCamelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase : Dict = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = torch_device
_UpperCamelCase : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_UpperCamelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Optional[int] = output.audios
_UpperCamelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Tuple = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = torch_device
_UpperCamelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_UpperCamelCase : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pipe(generator=lowerCamelCase__ ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_UpperCamelCase : Any = output.audios
_UpperCamelCase : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase : Any = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 236
| 0
|
'''simple docstring'''
from math import pi, sqrt
def _lowerCamelCase ( lowercase : float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 1_71.5:
raise OverflowError("math range error" )
elif num - int(lowercase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowerCamelCase ( ) -> None:
assert gamma(0.5 ) == sqrt(lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase_ : int = 1.0
while num:
lowerCAmelCase_ : Optional[Any] = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 63
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowercase ( ) ->None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''rsa''', 1024 )
print('''Key files generation successful.''' )
def __lowercase ( _UpperCamelCase ) ->tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
lowercase : List[str] = rabinMiller.generate_large_prime(_UpperCamelCase )
print('''Generating prime q...''' )
lowercase : Union[str, Any] = rabinMiller.generate_large_prime(_UpperCamelCase )
lowercase : Optional[int] = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
lowercase : Any = random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(_UpperCamelCase, (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
lowercase : List[str] = cryptoMath.find_mod_inverse(_UpperCamelCase, (p - 1) * (q - 1) )
lowercase : Union[str, Any] = (n, e)
lowercase : Tuple = (n, d)
return (public_key, private_key)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->None:
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase , lowercase : int = generate_key(_UpperCamelCase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""", '''w''' ) as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""", '''w''' ) as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 173
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase_ = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Tuple = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a_ )
__SCREAMING_SNAKE_CASE :str = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
__SCREAMING_SNAKE_CASE :int = parser.parse_args()
if not hasattr(a_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 191
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Union[str, Any] = resample
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : Optional[Any] = do_rescale
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : List[Any] = do_convert_rgb
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ : str = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : str = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : List[str] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : Optional[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase_ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_ : Tuple = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 23
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23
| 1
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[str] = inspect.getfile(accelerate.test_utils )
A__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
A__ : Tuple = ["accelerate", "launch"]
A__ : int = Path.home() / ".cache/huggingface/accelerate"
A__ : Tuple = "default_config.yaml"
A__ : List[Any] = config_folder / config_file
A__ : List[Any] = config_folder / "_default_config.yaml"
A__ : List[str] = Path("tests/test_configs" )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _SCREAMING_SNAKE_CASE (self : str ) -> Dict:
'''simple docstring'''
snake_case : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Tuple:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=snake_case__ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(snake_case__ ), self.test_file_path] , env=os.environ.copy() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[Any] = "test-tpu"
A__ : int = "us-central1-a"
A__ : Any = "ls"
A__ : int = ["accelerate", "tpu-config"]
A__ : Dict = "cd /usr/share"
A__ : List[Any] = "tests/test_samples/test_command_file.sh"
A__ : Optional[int] = "Running gcloud compute tpus tpu-vm ssh"
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : List[str] = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
snake_case : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=snake_case__ )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : Union[str, Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=snake_case__ , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
| 59
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 206
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = (DEISMultistepScheduler,)
lowerCamelCase = (("""num_inference_steps""", 25),)
def lowerCAmelCase ( self : Optional[int] , **UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
snake_case : Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**UpperCamelCase__ )
return config
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any]=0 , **UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[int] = dict(self.forward_default_kwargs )
snake_case : str = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
snake_case : int = self.dummy_sample
snake_case : Optional[Any] = 0.1 * sample
snake_case : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Tuple = self.get_scheduler_config(**UpperCamelCase__ )
snake_case : int = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
snake_case : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
snake_case : Tuple = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : Optional[int] = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
snake_case : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case : Optional[int] = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Any=0 , **UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = dict(self.forward_default_kwargs )
snake_case : Optional[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
snake_case : Optional[int] = self.dummy_sample
snake_case : str = 0.1 * sample
snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : str = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
snake_case : Optional[Any] = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case : str = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : str=None , **UpperCamelCase__ : Any ) -> int:
"""simple docstring"""
if scheduler is None:
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(**UpperCamelCase__ )
snake_case : str = scheduler_class(**UpperCamelCase__ )
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : List[Any] = self.get_scheduler_config(**UpperCamelCase__ )
snake_case : Tuple = scheduler_class(**UpperCamelCase__ )
snake_case : str = 10
snake_case : List[Any] = self.dummy_model()
snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Any = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
snake_case : Optional[Any] = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
snake_case : Dict = self.get_scheduler_config()
snake_case : int = scheduler_class(**UpperCamelCase__ )
snake_case : Optional[int] = self.dummy_sample
snake_case : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , '''set_timesteps''' ):
snake_case : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
snake_case : Any = scheduler.timesteps[5]
snake_case : List[Any] = scheduler.timesteps[6]
snake_case : List[str] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case : List[str] = self.full_loop(scheduler=UpperCamelCase__ )
snake_case : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
snake_case : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case : Any = self.full_loop(scheduler=UpperCamelCase__ )
snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , algorithm_type='''deis''' , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , algorithm_type=UpperCamelCase__ , )
snake_case : List[Any] = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , algorithm_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = self.full_loop()
snake_case : str = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.full_loop(prediction_type='''v_prediction''' )
snake_case : int = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : Tuple = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
snake_case : Optional[int] = scheduler_class(**UpperCamelCase__ )
snake_case : Tuple = 10
snake_case : Optional[Any] = self.dummy_model()
snake_case : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 367
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : str=0.9 , UpperCamelCase__ : Any=None , ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = parent
snake_case : Tuple = batch_size
snake_case : str = image_size
snake_case : Tuple = num_channels
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = tubelet_size
snake_case : Tuple = num_frames
snake_case : Optional[Any] = is_training
snake_case : Tuple = use_labels
snake_case : List[str] = hidden_size
snake_case : Any = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Any = mask_ratio
snake_case : Optional[int] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case : Dict = (image_size // patch_size) ** 2
snake_case : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case : Optional[int] = int(mask_ratio * self.seq_length )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
snake_case : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case : Tuple = None
if self.use_labels:
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
snake_case : Any = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : int = torch.ones((self.num_masks,) )
snake_case : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case : Tuple = mask.expand(self.batch_size , -1 ).bool()
snake_case : str = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
snake_case : Tuple = mask.sum().item()
snake_case : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Optional[int] = config_and_inputs
snake_case : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case : List[Any] = VideoMAEModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=False ) -> Optional[Any]:
"""simple docstring"""
snake_case : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case : Optional[int] = torch.ones((self.model_tester.num_masks,) )
snake_case : int = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case : Dict = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case : Optional[int] = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
snake_case : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.has_attentions:
pass
else:
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[int] = True
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case : Dict = True
snake_case : List[str] = False
snake_case : Tuple = True
snake_case : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : Any = True
snake_case : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : int = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case : Any = len(UpperCamelCase__ )
# Check attention is always last and order is fine
snake_case : Union[str, Any] = True
snake_case : Union[str, Any] = True
snake_case : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
snake_case : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
snake_case : Union[str, Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
snake_case : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case : Union[str, Any] = outputs.hidden_states
snake_case : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
snake_case : int = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : List[str] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
snake_case : int = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case : str = np.load(SCREAMING_SNAKE_CASE__ )
return list(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Dict = prepare_video()
snake_case : int = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : int = model(**UpperCamelCase__ )
# verify the logits
snake_case : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
snake_case : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
snake_case : str = self.default_image_processor
snake_case : Tuple = prepare_video()
snake_case : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
snake_case : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case : Dict = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**UpperCamelCase__ )
# verify the logits
snake_case : str = torch.Size([1, 1408, 1536] )
snake_case : List[str] = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case : Any = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case : str = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[int] = model(**UpperCamelCase__ )
snake_case : str = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 83
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowercase : str =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'The input training data file (a text file).'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ):
"""simple docstring"""
if self.train_file is not None:
lowerCamelCase_ =self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ =self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __UpperCamelCase :
lowercase : PreTrainedTokenizerBase
lowercase : Union[bool, str, PaddingStrategy] =True
lowercase : Optional[int] =None
lowercase : Optional[int] =None
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ =[feature.pop(lowerCAmelCase ) for feature in features]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =len(features[0]['''input_ids'''] )
lowerCamelCase_ =[
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase )] for feature in features
]
lowerCamelCase_ =list(chain(*lowerCAmelCase ) )
lowerCamelCase_ =self.tokenizer.pad(
lowerCAmelCase, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
# Un-flatten
lowerCamelCase_ ={k: v.view(lowerCAmelCase, lowerCAmelCase, -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.intaa )
return batch
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ =training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ ={}
if data_args.train_file is not None:
lowerCamelCase_ =data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ =data_args.validation_file
lowerCamelCase_ =data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ =load_dataset(
__snake_case , data_files=__snake_case , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ =load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ =[F'''ending{i}''' for i in range(4 )]
lowerCamelCase_ ='''sent1'''
lowerCamelCase_ ='''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ =tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ =1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowerCamelCase_ =min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__snake_case : Optional[int] ):
lowerCamelCase_ =[[context] * 4 for context in examples[context_name]]
lowerCamelCase_ =examples[question_header_name]
lowerCamelCase_ =[
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__snake_case )
]
# Flatten out
lowerCamelCase_ =list(chain(*__snake_case ) )
lowerCamelCase_ =list(chain(*__snake_case ) )
# Tokenize
lowerCamelCase_ =tokenizer(
__snake_case , __snake_case , truncation=__snake_case , max_length=__snake_case , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__snake_case ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ =raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ =min(len(__snake_case ) , data_args.max_train_samples )
lowerCamelCase_ =train_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ =train_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ =raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ =min(len(__snake_case ) , data_args.max_eval_samples )
lowerCamelCase_ =eval_dataset.select(range(__snake_case ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ =eval_dataset.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__snake_case , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__snake_case : Tuple ):
lowerCamelCase_, lowerCamelCase_ =eval_predictions
lowerCamelCase_ =np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ =Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
lowerCamelCase_ =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ =last_checkpoint
lowerCamelCase_ =trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ =train_result.metrics
lowerCamelCase_ =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
lowerCamelCase_ =min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ =trainer.evaluate()
lowerCamelCase_ =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
lowerCamelCase_ =min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
lowerCamelCase_ ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def a_ ( __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9
| 0
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if hor == 128:
snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ = (32, 128, 256)
snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ = (32, 64, 128, 256)
snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
snake_case_ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
snake_case_ = model.state_dict()
snake_case_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
snake_case_ = UNetaDModel(**UpperCamelCase__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
snake_case_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
snake_case_ = model
snake_case_ = UNetaDModel(**UpperCamelCase__ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 354
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 200
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__lowerCAmelCase : str = {'unk_token': '<unk>'}
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : str = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.prepare_image_inputs()
__lowerCAmelCase : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : str = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = 'lower newer'
__lowerCAmelCase : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Any = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = 'lower newer'
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : str = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'google/owlvit-base-patch32'
__lowerCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = ['cat', 'nasa badge']
__lowerCAmelCase : int = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'google/owlvit-base-patch32'
__lowerCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [['cat', 'nasa badge'], ['person']]
__lowerCAmelCase : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = 16
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = max([len(_SCREAMING_SNAKE_CASE ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'google/owlvit-base-patch32'
__lowerCAmelCase : Tuple = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = ['cat', 'nasa badge']
__lowerCAmelCase : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Union[str, Any] = inputs['input_ids']
__lowerCAmelCase : Union[str, Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(images=_SCREAMING_SNAKE_CASE , query_images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Any = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 86
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''']
__lowerCAmelCase = '''SamImageProcessor'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Any = self.image_processor
__a : List[Any] = -10
__a : str = self.image_processor.size['''longest_edge''']
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Tuple = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
__a : Optional[Any] = encoding_image_processor['''original_sizes''']
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks if Torch or TF tensor
__a : Optional[Any] = original_sizes.numpy()
__a , __a , __a : int = self._check_and_preprocess_points(
input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , )
__a : List[Any] = self._normalize_and_convert(
_UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , ):
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__a , __a : Tuple = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = np.array(_UpperCAmelCase )
if input_labels is not None:
__a : List[Any] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Any = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
__a : int = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
__a : Optional[int] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__a : Dict = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__a : int = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__a : List[Any] = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__a : str = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Dict = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = max([point.shape[0] for point in input_points] )
__a : Dict = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
__a : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__a : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
__a : int = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a : str = original_size
__a , __a : Optional[int] = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase )
__a : List[str] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
__a : Optional[int] = coords.reshape(-1 , 2 , 2 )
__a : str = coords[..., 0] * (new_w / old_w)
__a : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__a : List[Any] = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_points is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks for TF or Torch tensor
__a : str = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__a : str = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
__a : Optional[int] = None
if input_labels is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : Dict = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
__a : Dict = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
__a : Tuple = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase , _UpperCAmelCase )
or not isinstance(input_boxes[0] , _UpperCAmelCase )
or not isinstance(input_boxes[0][0] , _UpperCAmelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__a : Optional[Any] = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
__a : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase )
| 160
| 0
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
SCREAMING_SNAKE_CASE : List[Any] = ''''''
while len(a__ ) % 3 != 0:
SCREAMING_SNAKE_CASE : Optional[int] = '''0''' + bin_string
SCREAMING_SNAKE_CASE : Dict = [
bin_string[index : index + 3]
for index in range(len(a__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for index, val in enumerate(a__ ):
oct_val += int(2 ** (2 - index) * int(a__ ) )
oct_string += str(a__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 19
|
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
a__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
a__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase ) ),
}
| 19
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = (IPNDMScheduler,)
_lowerCAmelCase : Optional[int] = (('num_inference_steps', 50),)
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
snake_case = {'num_train_timesteps': 10_00}
config.update(**lowerCAmelCase )
return config
def snake_case ( self , lowerCAmelCase=0 , **lowerCAmelCase ):
"""simple docstring"""
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config(**lowerCAmelCase )
snake_case = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
snake_case = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , lowerCAmelCase=0 , **lowerCAmelCase ):
"""simple docstring"""
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
snake_case = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**lowerCAmelCase )
snake_case = scheduler_class(**lowerCAmelCase )
snake_case = 10
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(lowerCAmelCase , lowerCAmelCase )
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(lowerCAmelCase , lowerCAmelCase )
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('num_inference_steps' , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowerCAmelCase )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , 'set_timesteps' ):
snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case = dummy_past_residuals[:]
snake_case = scheduler.timesteps[5]
snake_case = scheduler.timesteps[6]
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase , time_step=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.full_loop()
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 150
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
a_ : Optional[Any] = 5
a_ : str = 10
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : int =SpeechaTextTokenizer
lowercase : int =False
lowercase : List[str] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase )
lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =Path(self.tmpdirname )
save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''<pad>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase ), 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase_ =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], )
lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium'
lowercase : Dict ='C\'est trop cool'
lowercase : str ='Esto es genial'
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size, 10_000 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids )
lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2]
lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
lowerCamelCase_ ='''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 75
| 0
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__UpperCAmelCase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _snake_case ( A ) -> str:
lowerCAmelCase__ = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(A , A )
__UpperCAmelCase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _snake_case ( A ) -> Any:
lowerCAmelCase__ = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase__ = new_key.replace(A , A )
print(F"""{key} -> {new_key}""" )
lowerCAmelCase__ = s_dict.pop(A )
return s_dict
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(A , A , bias=A )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def _snake_case ( A , A ) -> bytes:
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = os.path.basename(A )
lowerCAmelCase__ = url.split('''/''' )[-2]
lowerCAmelCase__ = os.path.join(A , A )
if os.path.exists(A ) and not os.path.isfile(A ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(A ):
lowerCAmelCase__ = open(A , '''rb''' ).read()
if hashlib.shaaaa(A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(A ) as source, open(A , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=A , unit_divisor=1024 ) as loop:
while True:
lowerCAmelCase__ = source.read(8192 )
if not buffer:
break
output.write(A )
loop.update(len(A ) )
lowerCAmelCase__ = open(A , '''rb''' ).read()
if hashlib.shaaaa(A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def _snake_case ( A , A ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase__ = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )
lowerCAmelCase__ = original_checkpoint['''dims''']
lowerCAmelCase__ = original_checkpoint['''model_state_dict''']
lowerCAmelCase__ = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(A )
rename_keys(A )
lowerCAmelCase__ = True
lowerCAmelCase__ = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase__ = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=A , decoder_ffn_dim=A , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase__ = WhisperForConditionalGeneration(A )
lowerCAmelCase__ , lowerCAmelCase__ = model.model.load_state_dict(A , strict=A )
if len(A ) > 0 and not set(A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowerCAmelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase__ = proj_out_weights
model.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCAmelCase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 351
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CLIPTokenizer
lowercase__ : List[str] = CLIPTokenizerFast
lowercase__ : Dict = True
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# fmt: off
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# CLIP always lower cases letters
pass
| 228
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return round(float(moles / volume ) * nfactor )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327
| 1
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A : str = 'sshleifer/bart-tiny-random'
A : Dict = 'patrickvonplaten/t5-tiny-random'
@require_torch
class __A( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return AutoConfig.from_pretrained(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a , *__a = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , *__a = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a , *__a = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a , *__a = create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
create_student_by_copying_alternating_layers(lowerCAmelCase__ , tempfile.mkdtemp() , e=lowerCAmelCase__ , d=lowerCAmelCase__ )
| 365
|
import functools
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = len(a__ )
__a = len(a__ )
@functools.cache
def min_distance(a__ , a__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a__ ) , 1 + min_distance(a__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 162
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162
| 1
|
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE : Tuple = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
snake_case = input("""Enter a string """).strip()
snake_case = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 364
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class UpperCAmelCase_ ( lowerCamelCase__ ):
def __init__( self : str , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
super().__init__(**__lowerCamelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> str:
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = {}
if "candidate_labels" in kwargs:
lowerCAmelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : str="This is a photo of {}." ) -> Union[str, Any]:
lowerCAmelCase = load_image(__lowerCamelCase )
lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase = candidate_labels
lowerCAmelCase = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
lowerCAmelCase = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
lowerCAmelCase = model_inputs.pop('candidate_labels' )
lowerCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __lowerCamelCase ):
lowerCAmelCase = text_inputs[0]
else:
# Batching case.
lowerCAmelCase = text_inputs[0][0]
lowerCAmelCase = self.model(**__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[str] ) -> int:
lowerCAmelCase = model_outputs.pop('candidate_labels' )
lowerCAmelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCAmelCase = [scores]
elif self.framework == "tf":
lowerCAmelCase = stable_softmax(__lowerCamelCase , axis=-1 )
lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda UpperCAmelCase__ : -x[0] )
]
return result
| 4
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__ ( ):
'''simple docstring'''
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase__ : Any = [1, 2, 3]
with pytest.raises(lowerCamelCase_):
with parallel_backend('''unsupported backend'''):
map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=2)
with pytest.raises(lowerCamelCase_):
with parallel_backend('''unsupported backend'''):
map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1])
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = [1, 2]
lowerCAmelCase__ : Tuple = {'''a''': 1, '''b''': 2}
lowerCAmelCase__ : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCAmelCase__ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCAmelCase__ : Union[str, Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCAmelCase__ : int = [2, 3]
lowerCAmelCase__ : List[str] = {'''a''': 2, '''b''': 3}
lowerCAmelCase__ : str = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCAmelCase__ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCAmelCase__ : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=lowerCamelCase_) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=lowerCamelCase_) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=lowerCamelCase_) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=lowerCamelCase_) == expected_map_nested_sa
assert map_nested(lowerCamelCase_ ,lowerCamelCase_ ,num_proc=lowerCamelCase_) == expected_map_nested_sa
| 129
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = CLIPTokenizer
__magic_name__ :Union[str, Any] = CLIPTokenizerFast
__magic_name__ :int = True
__magic_name__ :Tuple = {}
__magic_name__ :List[Any] = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowerCAmelCase__ :List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ :Dict = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ :Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowerCAmelCase__ :Tuple = {'unk_token': '<unk>'}
lowerCAmelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'lower newer'
lowerCAmelCase__ :Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ :int = 'lower newer'
lowerCAmelCase__ :str = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowerCAmelCase__ :List[str] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ :List[str] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
@require_ftfy
def snake_case ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ :Any = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowerCAmelCase__ :Any = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ :Union[str, Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
lowerCAmelCase__ :Union[str, Any] = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ :int = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ :int = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ :Dict = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ :Union[str, Any] = tokenizer_s.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenizer_r.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ :Optional[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ :Union[str, Any] = F"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase__ :List[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
lowerCAmelCase__ :Optional[Any] = F" {text}"
lowerCAmelCase__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , )
lowerCAmelCase__ :int = tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
'''simple docstring'''
pass
| 254
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254
| 1
|
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCamelCase__: Optional[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> List[str]:
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , '''sklearn''' )
return (preds == labels).mean()
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> List[str]:
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , '''sklearn''' )
UpperCAmelCase : int = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> List[str]:
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , '''sklearn''' )
UpperCAmelCase : List[Any] = pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
UpperCAmelCase : List[Any] = spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Union[str, Any]:
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , '''sklearn''' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Dict:
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , '''sklearn''' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 23
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ):
_UpperCAmelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
_UpperCAmelCase : List[str] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
return image
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( a_: Optional[Any], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : Dict = dct.pop(snake_case__ )
_UpperCAmelCase : str = val
def __UpperCAmelCase ( a_: str, a_: Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : Optional[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
_UpperCAmelCase : int = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_UpperCAmelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__, requires_grad=snake_case__ ), v_bias) )
_UpperCAmelCase : Tuple = qkv_bias
def __UpperCAmelCase ( a_: Tuple, a_: Optional[Any] ):
_UpperCAmelCase : str = 364 if 'coco' in model_name else 224
_UpperCAmelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-2.7b", eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : int = OPTConfig.from_pretrained("facebook/opt-6.7b", eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : List[str] = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
_UpperCAmelCase : Any = BlipaConfig(vision_config=snake_case__, text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( a_: int, a_: Dict=None, a_: int=False ):
_UpperCAmelCase : str = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if 'opt' in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_UpperCAmelCase : int = tokenizer("\n", add_special_tokens=snake_case__ ).input_ids[0]
_UpperCAmelCase : Union[str, Any] = get_blipa_config(snake_case__, eos_token_id=snake_case__ )
_UpperCAmelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
_UpperCAmelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
_UpperCAmelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCAmelCase : Tuple = load_model_and_preprocess(
name=snake_case__, model_type=snake_case__, is_eval=snake_case__, device=snake_case__ )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : List[Any] = original_model.state_dict()
_UpperCAmelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[str] = key.replace("Qformer.bert", "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Tuple = key.replace("self", "attention" )
if "opt_proj" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("opt_proj", "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Optional[Any] = key.replace("t5_proj", "language_projection" )
if key.startswith("opt" ):
_UpperCAmelCase : Dict = key.replace("opt", "language" )
if key.startswith("t5" ):
_UpperCAmelCase : Dict = key.replace("t5", "language" )
_UpperCAmelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__, snake_case__ )
_UpperCAmelCase : Any = hf_model.load_state_dict(snake_case__, strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : List[str] = load_demo_image()
_UpperCAmelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_UpperCAmelCase : Any = tokenizer(["\n"], return_tensors="pt" ).input_ids.to(snake_case__ )
# create processor
_UpperCAmelCase : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size}, image_mean=snake_case__, image_std=snake_case__ )
_UpperCAmelCase : Any = BlipaProcessor(image_processor=snake_case__, tokenizer=snake_case__ )
_UpperCAmelCase : Optional[int] = processor(images=snake_case__, return_tensors="pt" ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__, snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : Tuple = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_UpperCAmelCase : str = hf_model(snake_case__, snake_case__ ).logits
else:
_UpperCAmelCase : Tuple = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_UpperCAmelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
_UpperCAmelCase : Optional[int] = hf_model(snake_case__, snake_case__, labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:", original_logits[0, :3, :3] )
print("First values of HF logits:", logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : List[str] = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]], device=snake_case__ )
assert torch.allclose(logits[0, :3, :3], snake_case__, atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]], device=snake_case__ )
else:
# cast to same type
_UpperCAmelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ), snake_case__, atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
_UpperCAmelCase : Optional[int] = ''
_UpperCAmelCase : Union[str, Any] = tokenizer(snake_case__, return_tensors="pt" ).input_ids.to(snake_case__ )
_UpperCAmelCase : str = original_model.generate({"image": original_pixel_values} )
_UpperCAmelCase : str = hf_model.generate(
snake_case__, snake_case__, do_sample=snake_case__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print("Original generation:", snake_case__ )
_UpperCAmelCase : Optional[int] = input_ids.shape[1]
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=snake_case__ )
_UpperCAmelCase : Dict = [text.strip() for text in output_text]
print("HF generation:", snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
__a = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364
|
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 10
_UpperCAmelCase : int = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_UpperCAmelCase : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(a_ ) ),
}, features=a_, )
return dataset
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
__a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "file.txt"
_UpperCAmelCase : Tuple = FILE_CONTENT
with open(a_, "w" ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_UpperCAmelCase : Optional[int] = bytes(a_, "utf-8" )
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
import gzip
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_UpperCAmelCase : Any = bytes(a_, "utf-8" )
with gzip.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_UpperCAmelCase : str = bytes(a_, "utf-8" )
with lza.frame.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Any ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(a_, "w" ) as archive:
archive.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: List[str] ):
import tarfile
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
import lzma
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_UpperCAmelCase : List[str] = bytes(a_, "utf-8" )
with lzma.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: Tuple ):
import zipfile
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_UpperCAmelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_UpperCAmelCase : int = bytes(a_, "utf-8" )
with zstd.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : List[str] = tmp_path_factory.mktemp("data" ) / "file.xml"
_UpperCAmelCase : Tuple = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(a_, "w" ) as f:
f.write(a_ )
return filename
__a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : str = datasets.Dataset.from_dict(a_ )
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_UpperCAmelCase : List[Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Dict = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(a_, "w", newline="" ) as f:
_UpperCAmelCase : Optional[int] = csv.DictWriter(a_, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: str, a_: str ):
import bza
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(a_, "rb" ) as f:
_UpperCAmelCase : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_, "wb" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: Dict, a_: Optional[int] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: Union[str, Any], a_: int ):
_UpperCAmelCase : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(a_, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: Union[str, Any], a_: Tuple ):
_UpperCAmelCase : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_UpperCAmelCase : Dict = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(a_, "wb" ) as f:
_UpperCAmelCase : Tuple = pq.ParquetWriter(a_, schema=a_ )
_UpperCAmelCase : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]}, schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : str = {"data": DATA}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_UpperCAmelCase : Dict = {"data": DATA_DICT_OF_LISTS}
with open(a_, "w" ) as f:
json.dump(a_, a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Any = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(a_, "w" ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(a_, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Any ):
import gzip
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Tuple ):
import gzip
_UpperCAmelCase : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(a_, "rb" ) as orig_file:
with gzip.open(a_, "wb" ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Dict, a_: List[Any], a_: Union[str, Any] ):
_UpperCAmelCase : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: Optional[Any], a_: Dict ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: Optional[int], a_: List[str] ):
_UpperCAmelCase : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.basename(a_ ) )
f.add(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str], a_: List[Any], a_: Tuple, a_: Dict ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(a_, "w" ) as f:
f.add(a_, arcname=os.path.join("nested", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : List[str] = ["0", "1", "2", "3"]
_UpperCAmelCase : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Dict = ["0", "1", "2", "3"]
_UpperCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : int = ["0", "1", "2", "3"]
_UpperCAmelCase : str = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(a_, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[int], a_: List[Any], a_: List[Any] ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
f.write(a_, arcname=os.path.join("main_dir", os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Any, a_: str, a_: Tuple ):
_UpperCAmelCase : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename("unsupported.ext" ) )
f.write(a_, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : List[str] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_UpperCAmelCase : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(a_ )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(a_, "w" ) as f:
f.write(a_, arcname=os.path.basename(a_ ) )
f.write(a_, arcname=os.path.basename(a_ ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 17
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Dict = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """mobilenet_v2"""
def __init__( self : str , snake_case_ : List[str]=3 , snake_case_ : Any=2_2_4 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : int=8 , snake_case_ : List[str]=8 , snake_case_ : Dict=6 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=True , snake_case_ : Optional[Any]="relu6" , snake_case_ : int=True , snake_case_ : Any=0.8 , snake_case_ : List[str]=0.0_2 , snake_case_ : Optional[int]=0.0_0_1 , snake_case_ : Dict=2_5_5 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = depth_divisible_by
_UpperCAmelCase = min_depth
_UpperCAmelCase = expand_ratio
_UpperCAmelCase = output_stride
_UpperCAmelCase = first_layer_is_expansion
_UpperCAmelCase = finegrained_output
_UpperCAmelCase = hidden_act
_UpperCAmelCase = tf_padding
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def lowercase ( self : Optional[int] ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase ( self : Union[str, Any] ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase ( self : List[Any] ):
return 1e-4
| 22
|
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 1
|
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
_UpperCAmelCase : str = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = 'ylacombe/bark-small'
lowercase__: int = tempfile.mkdtemp()
lowercase__: List[Any] = 'en_speaker_1'
lowercase__: int = 'This is a test string'
lowercase__: Any = 'speaker_embeddings_path.json'
lowercase__: List[str] = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Tuple = self.get_tokenizer()
lowercase__: Union[str, Any] = BarkProcessor(tokenizer=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__: List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__: Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__: int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__: Tuple = 35
lowercase__: List[str] = 2
lowercase__: Any = 8
lowercase__: Optional[int] = {
'semantic_prompt': np.ones(__lowerCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__: Any = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
lowercase__: List[str] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__: str = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__: Tuple = processor(text=self.input_string , voice_preset=__lowerCAmelCase )
lowercase__: List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__: Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: int = BarkProcessor(tokenizer=__lowerCAmelCase )
lowercase__: Union[str, Any] = processor(text=self.input_string )
lowercase__: Any = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 371
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = r"\w+[.]\d+"
lowerCamelCase_ = re.findall(lowerCamelCase__ , lowerCamelCase__ )
for pat in pats:
lowerCamelCase_ = key.replace(lowerCamelCase__ , "_".join(pat.split("." ) ) )
return key
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCamelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCamelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCamelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCamelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4_2 ):
# Step 1: Convert pytorch tensor to numpy
lowerCamelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCamelCase_ = flax_model.init_weights(PRNGKey(lowerCamelCase__ ) )
lowerCamelCase_ = flatten_dict(lowerCamelCase__ )
lowerCamelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase_ = rename_key(lowerCamelCase__ )
lowerCamelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCamelCase_ , lowerCamelCase_ = rename_key_and_reshape_tensor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
lowerCamelCase_ = jnp.asarray(lowerCamelCase__ )
return unflatten_dict(lowerCamelCase__ )
| 19
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''sentencepiece.model'''}
A = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
A = {
'''google/rembert''': 256,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , **_UpperCAmelCase , ):
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = do_lower_case
__a : Tuple = remove_space
__a : Dict = keep_accents
__a : Optional[int] = vocab_file
__a : str = spm.SentencePieceProcessor()
self.sp_model.Load(_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
__a : List[str] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a : Any = self.__dict__.copy()
__a : int = None
return state
def __setstate__( self , _UpperCAmelCase ):
__a : int = d
__a : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : Dict = self.sp_model.EncodeAsPieces(_UpperCAmelCase )
return pieces
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = self.sp_model.decode_pieces(_UpperCAmelCase )
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : List[str] = [self.sep_token_id]
__a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
__a : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 188
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=1 , ):
__a : Dict = parent
__a : str = batch_size
__a : Union[str, Any] = seq_length
__a : Any = is_training
__a : int = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : int = use_labels
__a : int = vocab_size
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : List[str] = type_sequence_label_size
__a : List[str] = initializer_range
__a : Optional[int] = num_labels
__a : List[str] = num_choices
__a : int = scope
__a : Union[str, Any] = q_groups
__a : Dict = k_groups
__a : List[str] = v_groups
__a : Any = post_attention_groups
__a : Optional[int] = intermediate_groups
__a : List[str] = output_groups
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
__a : Union[str, Any] = None
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Dict = ids_tensor([self.batch_size] , self.num_choices )
__a : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = self.num_labels
__a : List[Any] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = self.num_labels
__a : List[str] = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self.num_choices
__a : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Optional[Any] = config_and_inputs
__a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SqueezeBertModelTester(self )
__a : Dict = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__a : Tuple = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__a : List[str] = model(_UpperCAmelCase )[0]
__a : int = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
| 188
| 1
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : int = 'MobileNetV1Config'
# Base docstring
__SCREAMING_SNAKE_CASE : List[Any] = 'google/mobilenet_v1_1.0_224'
__SCREAMING_SNAKE_CASE : str = [1, 1_024, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'google/mobilenet_v1_1.0_224'
__SCREAMING_SNAKE_CASE : int = 'tabby, tabby cat'
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
snake_case_ = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = model.mobilenet_va
else:
snake_case_ = model
snake_case_ = """MobilenetV1/Conv2d_0/"""
snake_case_ = backbone.conv_stem.convolution.weight
snake_case_ = backbone.conv_stem.normalization.bias
snake_case_ = backbone.conv_stem.normalization.weight
snake_case_ = backbone.conv_stem.normalization.running_mean
snake_case_ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
snake_case_ = i + 1
snake_case_ = i * 2
snake_case_ = backbone.layer[pt_index]
snake_case_ = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
snake_case_ = pointer.convolution.weight
snake_case_ = pointer.normalization.bias
snake_case_ = pointer.normalization.weight
snake_case_ = pointer.normalization.running_mean
snake_case_ = pointer.normalization.running_var
snake_case_ = backbone.layer[pt_index + 1]
snake_case_ = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
snake_case_ = pointer.convolution.weight
snake_case_ = pointer.normalization.bias
snake_case_ = pointer.normalization.weight
snake_case_ = pointer.normalization.running_mean
snake_case_ = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
snake_case_ = model.classifier.weight
snake_case_ = model.classifier.bias
return tf_to_pt_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
snake_case_ = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
snake_case_ = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
snake_case_ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = array
# Build TF to PyTorch weights loading map
snake_case_ = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
snake_case_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
snake_case_ = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
snake_case_ = array.squeeze().transpose()
else:
snake_case_ = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
snake_case_ , snake_case_ = features.shape[-2:]
snake_case_ , snake_case_ = conv_layer.stride
snake_case_ , snake_case_ = conv_layer.kernel_size
if in_height % stride_height == 0:
snake_case_ = max(kernel_height - stride_height , 0 )
else:
snake_case_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
snake_case_ = max(kernel_width - stride_width , 0 )
else:
snake_case_ = max(kernel_width - (in_width % stride_width) , 0 )
snake_case_ = pad_along_width // 2
snake_case_ = pad_along_width - pad_left
snake_case_ = pad_along_height // 2
snake_case_ = pad_along_height - pad_top
snake_case_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 )
class __A (nn.Module):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool or str] = True , ) ->None:
"""simple docstring"""
super().__init__()
snake_case_ = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
snake_case_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
snake_case_ = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
snake_case_ = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
snake_case_ = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
snake_case_ = ACTaFN[config.hidden_act]
else:
snake_case_ = config.hidden_act
else:
snake_case_ = None
def lowerCAmelCase ( self : int , UpperCAmelCase_ : torch.Tensor ) ->torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
snake_case_ = apply_tf_padding(UpperCAmelCase_ , self.convolution )
snake_case_ = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
snake_case_ = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
snake_case_ = self.activation(UpperCAmelCase_ )
return features
class __A (snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = MobileNetVaConfig
__lowercase: List[str] = load_tf_weights_in_mobilenet_va
__lowercase: Tuple = """mobilenet_v1"""
__lowercase: List[Any] = """pixel_values"""
__lowercase: Union[str, Any] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[nn.Linear, nn.Convad] ) ->None:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__SCREAMING_SNAKE_CASE : int = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__SCREAMING_SNAKE_CASE : List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case__ , )
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : bool = True ) ->List[Any]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
snake_case_ = config
snake_case_ = 32
snake_case_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
snake_case_ = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
snake_case_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
snake_case_ = nn.ModuleList()
for i in range(13 ):
snake_case_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
snake_case_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
snake_case_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
snake_case_ = self.conv_stem(UpperCAmelCase_ )
snake_case_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
snake_case_ = layer_module(UpperCAmelCase_ )
if output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = hidden_states
if self.pooler is not None:
snake_case_ = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
snake_case_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class __A (snake_case__):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : MobileNetVaConfig ) ->None:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
snake_case_ = config.num_labels
snake_case_ = MobileNetVaModel(UpperCAmelCase_ )
snake_case_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
snake_case_ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
snake_case_ = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier(self.dropout(UpperCAmelCase_ ) )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = """single_label_classification"""
else:
snake_case_ = """multi_label_classification"""
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 347
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : str = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=__lowerCAmelCase ):
__lowerCAmelCase : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 364
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = value_function
UpperCAmelCase : Dict = unet
UpperCAmelCase : Union[str, Any] = scheduler
UpperCAmelCase : List[Any] = env
UpperCAmelCase : int = env.get_dataset()
UpperCAmelCase : Optional[int] = {}
for key in self.data.keys():
try:
UpperCAmelCase : Dict = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase : int = {}
for key in self.data.keys():
try:
UpperCAmelCase : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase : Any = env.observation_space.shape[0]
UpperCAmelCase : str = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if type(_SCREAMING_SNAKE_CASE ) is dict:
return {k: self.to_torch(_SCREAMING_SNAKE_CASE ) for k, v in x_in.items()}
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ):
return x_in.to(self.unet.device )
return torch.tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
for key, val in cond.items():
UpperCAmelCase : Optional[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict = x.shape[0]
UpperCAmelCase : Optional[int] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase : Tuple = torch.full((batch_size,) , _SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long )
for _ in range(_SCREAMING_SNAKE_CASE ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase : Dict = self.value_function(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase : List[Any] = self.scheduler._get_variance(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.exp(0.5 * posterior_variance )
UpperCAmelCase : str = model_std * grad
UpperCAmelCase : str = 0
UpperCAmelCase : Any = x.detach()
UpperCAmelCase : int = x + scale * grad
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : Optional[int] = self.unet(x.permute(0 , 2 , 1 ) , _SCREAMING_SNAKE_CASE ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , predict_epsilon=_SCREAMING_SNAKE_CASE )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase : Dict = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : int = self.to_torch(_SCREAMING_SNAKE_CASE )
return x, y
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = self.normalize(_SCREAMING_SNAKE_CASE , """observations""" )
UpperCAmelCase : int = obs[None].repeat(_SCREAMING_SNAKE_CASE , axis=0 )
UpperCAmelCase : Dict = {0: self.to_torch(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase : Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase : str = randn_tensor(_SCREAMING_SNAKE_CASE , device=self.unet.device )
UpperCAmelCase : Any = self.reset_xa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.action_dim )
UpperCAmelCase : str = self.to_torch(_SCREAMING_SNAKE_CASE )
# run the diffusion process
UpperCAmelCase , UpperCAmelCase : Any = self.run_diffusion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# sort output trajectories by value
UpperCAmelCase : List[str] = y.argsort(0 , descending=_SCREAMING_SNAKE_CASE ).squeeze()
UpperCAmelCase : Any = x[sorted_idx]
UpperCAmelCase : Dict = sorted_values[:, :, : self.action_dim]
UpperCAmelCase : int = actions.detach().cpu().numpy()
UpperCAmelCase : List[str] = self.de_normalize(_SCREAMING_SNAKE_CASE , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCAmelCase : Any = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase : Optional[int] = np.random.randint(0 , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 76
| 0
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
_lowerCAmelCase = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
_lowerCAmelCase = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ : Optional[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Optional[int] = numpy_to_pil(UpperCamelCase )
return images
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if images.ndim == 3:
lowerCAmelCase__ : List[str] = images[None, ...]
lowerCAmelCase__ : int = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCAmelCase__ : int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowerCAmelCase__ : Tuple = [Image.fromarray(UpperCamelCase ) for image in images]
return pil_images
| 37
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''gptsan-japanese'''
lowerCAmelCase = [
'''past_key_values''',
]
lowerCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=36_000 ,_SCREAMING_SNAKE_CASE=1_280 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=8_192 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE="float32" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=0.0_02 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=35_998 ,_SCREAMING_SNAKE_CASE=35_995 ,_SCREAMING_SNAKE_CASE=35_999 ,**_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = d_model
UpperCAmelCase_ : List[str] = d_ff
UpperCAmelCase_ : List[Any] = d_ext
UpperCAmelCase_ : Any = d_spout
UpperCAmelCase_ : Union[str, Any] = num_switch_layers
UpperCAmelCase_ : int = num_ext_layers
UpperCAmelCase_ : List[Any] = num_switch_layers + num_ext_layers
UpperCAmelCase_ : Any = num_heads
UpperCAmelCase_ : str = num_experts
UpperCAmelCase_ : Tuple = expert_capacity
UpperCAmelCase_ : List[str] = dropout_rate
UpperCAmelCase_ : Union[str, Any] = layer_norm_epsilon
UpperCAmelCase_ : Any = router_bias
UpperCAmelCase_ : Union[str, Any] = router_jitter_noise
UpperCAmelCase_ : Any = router_dtype
UpperCAmelCase_ : List[Any] = router_ignore_padding_tokens
UpperCAmelCase_ : Optional[Any] = output_hidden_states
UpperCAmelCase_ : int = output_attentions
UpperCAmelCase_ : Dict = initializer_factor
UpperCAmelCase_ : str = output_router_logits
UpperCAmelCase_ : int = use_cache
super().__init__(
separator_token_id=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
| 235
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''pixel_values''']
def __init__( self ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = 1 / 255 ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
UpperCAmelCase_ : str = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : int = do_center_crop
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Optional[int] = resample
UpperCAmelCase_ : List[Any] = do_rescale
UpperCAmelCase_ : Tuple = rescale_factor
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
UpperCAmelCase_ : Dict = get_resize_output_image_size(_SCREAMING_SNAKE_CASE ,size['''shortest_edge'''] ,default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Tuple = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
return rescale(_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Any = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCAmelCase_ : Union[str, Any] = self.resize(image=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
UpperCAmelCase_ : Optional[int] = self.center_crop(_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE )
if do_rescale:
UpperCAmelCase_ : str = self.rescale(image=_SCREAMING_SNAKE_CASE ,scale=_SCREAMING_SNAKE_CASE )
if do_normalize:
UpperCAmelCase_ : List[Any] = self.normalize(image=_SCREAMING_SNAKE_CASE ,mean=_SCREAMING_SNAKE_CASE ,std=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return image
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**_SCREAMING_SNAKE_CASE ,) -> PIL.Image.Image:
UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : int = resample if resample is not None else self.resample
UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : List[str] = size if size is not None else self.size
UpperCAmelCase_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE ,default_to_square=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase_ : List[Any] = make_batched(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE ,do_resize=_SCREAMING_SNAKE_CASE ,size=_SCREAMING_SNAKE_CASE ,resample=_SCREAMING_SNAKE_CASE ,do_center_crop=_SCREAMING_SNAKE_CASE ,crop_size=_SCREAMING_SNAKE_CASE ,do_rescale=_SCREAMING_SNAKE_CASE ,rescale_factor=_SCREAMING_SNAKE_CASE ,do_normalize=_SCREAMING_SNAKE_CASE ,image_mean=_SCREAMING_SNAKE_CASE ,image_std=_SCREAMING_SNAKE_CASE ,data_format=_SCREAMING_SNAKE_CASE ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : Any = {'''pixel_values''': videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE ,tensor_type=_SCREAMING_SNAKE_CASE )
| 235
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = """\
"""
SCREAMING_SNAKE_CASE : Any = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
SCREAMING_SNAKE_CASE : Dict = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = 16 , a_ = True , a_=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case : Optional[Any] = '''cuda'''
else:
__snake_case : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__snake_case : int = AutoModelForCausalLM.from_pretrained(a_ )
__snake_case : Optional[int] = model.to(a_ )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case : List[Any] = model.config.max_length - 1
else:
__snake_case : Dict = model.config.max_length
__snake_case : Tuple = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
__snake_case : List[Any] = encodings['''input_ids''']
__snake_case : str = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case : Union[str, Any] = []
__snake_case : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
__snake_case : Optional[int] = min(start_index + batch_size , len(a_ ) )
__snake_case : int = encoded_texts[start_index:end_index]
__snake_case : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
__snake_case : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
__snake_case : List[Any] = encoded_batch
with torch.no_grad():
__snake_case : List[str] = model(a_ , attention_mask=a_ ).logits
__snake_case : List[str] = out_logits[..., :-1, :].contiguous()
__snake_case : int = labels[..., 1:].contiguous()
__snake_case : int = attn_mask[..., 1:].contiguous()
__snake_case : Tuple = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 102
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319
| 0
|
import re
def UpperCAmelCase ( a_ ) -> bool:
"""simple docstring"""
__A = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(a_ , a_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 124
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''DeiTFeatureExtractor''']
a_ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : Union[str, Any] = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] ,lowercase__ : Dict=None ,lowercase__ : Union[str, Any]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[Any]=None ,**lowercase__ : int ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*lowercase__ : List[str] ,**lowercase__ : int ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : Optional[int] ,**lowercase__ : Union[str, Any] ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,lowercase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,lowercase__ ,)
return self.image_processor
| 104
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[int] = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowerCamelCase_ : Optional[bool] = field(
default=_a, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowerCamelCase_ : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : str = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = predict_dataset.features['''label'''].names
# Labels
snake_case_ : int = len(_UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , idalabel={str(_UpperCamelCase ): label for i, label in enumerate(_UpperCamelCase )} , labelaid={label: i for i, label in enumerate(_UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ : str = False
def preprocess_function(_UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_UpperCamelCase , max_length=data_args.max_seq_length , truncation=_UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : List[Any] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
snake_case_ : int = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ : Optional[int] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
snake_case_ : List[str] = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ : List[str] = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , data_args.max_predict_samples )
snake_case_ : Dict = predict_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ : List[str] = predict_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
snake_case_ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
snake_case_ : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case_ : Tuple = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ : Optional[int] = default_data_collator
elif training_args.fpaa:
snake_case_ : Any = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case_ : Any = None
# Initialize our Trainer
snake_case_ : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case_ : int = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : Dict = last_checkpoint
snake_case_ : int = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case_ : Union[str, Any] = train_result.metrics
snake_case_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Dict = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Any = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case_ : str = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = trainer.predict(_UpperCamelCase , metric_key_prefix='''predict''' )
snake_case_ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Optional[int] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''predict''' , _UpperCamelCase )
trainer.save_metrics('''predict''' , _UpperCamelCase )
snake_case_ : List[Any] = np.argmax(_UpperCamelCase , axis=1 )
snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_UpperCamelCase ):
snake_case_ : List[str] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 279
| 1
|
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17
| 0
|
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE )-> None:
lowerCamelCase_ =data
lowerCamelCase_ =None
lowerCamelCase_ =None
def __UpperCamelCase ( _A : Node | None ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCamelCase ( _A : Node | None ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCamelCase ( _A : Node ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCamelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
lowerCamelCase_ =Node(1 )
lowerCamelCase_ =Node(2 )
lowerCamelCase_ =Node(3 )
lowerCamelCase_ =Node(4 )
lowerCamelCase_ =Node(5 )
lowerCamelCase_ =Node(6 )
lowerCamelCase_ =Node(7 )
lowerCamelCase_ =Node(8 )
lowerCamelCase_ =Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("""Tree is: """ )
display(_A )
if __name__ == "__main__":
main()
| 370
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __UpperCamelCase ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ) ->list[float]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =coefficient_matrix.shape
lowerCamelCase_ , lowerCamelCase_ =constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ =f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_A )
if colsa != 1:
lowerCamelCase_ =f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_A )
if rowsa != rowsa:
lowerCamelCase_ =(
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_A )
if len(_A ) != rowsa:
lowerCamelCase_ =(
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_A )} and {rowsa}'
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase_ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ , lowerCamelCase_ =table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
lowerCamelCase_ =[]
for row in range(_A ):
lowerCamelCase_ =0
for col in range(_A ):
if col == row:
lowerCamelCase_ =table[row][col]
elif col == cols - 1:
lowerCamelCase_ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ =(temp + val) / denom
new_val.append(_A )
lowerCamelCase_ =new_val
return [float(_A ) for i in new_val]
def __UpperCamelCase ( _A : NDArray[floataa] ) ->bool:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ =table.shape
lowerCamelCase_ =True
for i in range(0 , _A ):
lowerCamelCase_ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def a_ ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 77
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""")
| 269
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 361
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCAmelCase__ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , snake_case__ = " " ):
"""simple docstring"""
lowerCAmelCase : List[Any] = sentence_delimiter
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return list(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
for sent_idx, sentence in enumerate(snake_case__ ):
chars.extend(self.process_string(snake_case__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCAmelCase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCAmelCase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCAmelCase__ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCAmelCase__ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowerCAmelCase__ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"]
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[Any] = 0
for prediction, reference in zip(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 133
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
snake_case_ : List[str] = logging.get_logger(__name__)
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
def __init__( self : Tuple , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCAmelCase_ = get_size_dict(_snake_case , param_name='''crop_size''')
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_flip_channel_order
def lowerCamelCase ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PIL.Image.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""")
UpperCAmelCase_ = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case)
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : List[str] , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : np.ndarray , _snake_case : Optional[Union[str, ChannelDimension]] = None):
"""simple docstring"""
return flip_channel_order(_snake_case , data_format=_snake_case)
def lowerCamelCase ( self : Tuple , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_snake_case , param_name='''crop_size''')
UpperCAmelCase_ = make_list_of_images(_snake_case)
if not valid_images(_snake_case):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_snake_case) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=_snake_case , size=_snake_case) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_snake_case , scale=_snake_case) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase_ = [self.flip_channel_order(image=_snake_case) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_snake_case , _snake_case) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Dict , _snake_case : List[Tuple] = None):
"""simple docstring"""
UpperCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case) != len(_snake_case):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(_snake_case):
UpperCAmelCase_ = target_sizes.numpy()
UpperCAmelCase_ = []
for idx in range(len(_snake_case)):
UpperCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case)
UpperCAmelCase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_snake_case)
else:
UpperCAmelCase_ = logits.argmax(dim=1)
UpperCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 51
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ = parser.parse_args()
if args.model_type == "roberta":
a_ = RobertaForMaskedLM.from_pretrained(args.model_name)
a_ = 'roberta'
elif args.model_type == "gpt2":
a_ = GPTaLMHeadModel.from_pretrained(args.model_name)
a_ = 'transformer'
a_ = model.state_dict()
a_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a_ = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a_ = F"""{prefix}.embeddings.{w}.weight"""
a_ = state_dict[param_name]
for w in ["weight", "bias"]:
a_ = F"""{prefix}.embeddings.LayerNorm.{w}"""
a_ = state_dict[param_name]
# Transformer Blocks #
a_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
a_ = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a_ = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ = state_dict[F"""lm_head.dense.{w}"""]
a_ = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a_ = state_dict[F"""{prefix}.ln_f.{w}"""]
a_ = state_dict['lm_head.weight']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 175
| 0
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list] ) -> list[list]:
__lowerCAmelCase : Optional[int] = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
__lowerCAmelCase : Any = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
__lowerCAmelCase : Optional[Any] = column
continue
__lowerCAmelCase : Optional[Any] = column / magnitude
# Subtract to cancel term
__lowerCAmelCase : Optional[int] = current_set[0]
__lowerCAmelCase : List[str] = [first_row]
__lowerCAmelCase : Any = current_set[1::]
for row in current_set:
__lowerCAmelCase : Optional[int] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__lowerCAmelCase : List[str] = final_set[0]
__lowerCAmelCase : str = []
__lowerCAmelCase : int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__lowerCAmelCase : Optional[Any] = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
__lowerCAmelCase : Tuple = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list] ) -> list:
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
__lowerCAmelCase : Optional[Any] = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
__lowerCAmelCase : Dict = equations.copy()
if any(0 in row for row in data_set ):
__lowerCAmelCase : Optional[Any] = data_set.copy()
__lowerCAmelCase : Optional[int] = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
__lowerCAmelCase : Optional[int] = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
__lowerCAmelCase : str = data_set.copy()
__lowerCAmelCase : int = simplify(lowerCAmelCase__ )
__lowerCAmelCase : List[Any] = simplified[::-1]
__lowerCAmelCase : Tuple = []
for row in simplified:
__lowerCAmelCase : Optional[int] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__lowerCAmelCase : List[str] = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
__lowerCAmelCase : Tuple = temp_row[1::]
__lowerCAmelCase : int = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
__lowerCAmelCase : str = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
A = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 360
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> Dict:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if n == 0:
return 0
__lowerCAmelCase : Union[str, Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowerCAmelCase : Union[str, Any] = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE ) )
return max_revue
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> List[str]:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowerCAmelCase : str = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowerCAmelCase : List[Any] = max(
SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : List[str] = max_revenue
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> Union[str, Any]:
_enforce_args(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowerCAmelCase : Optional[int] = [float("""-inf""" ) for _ in range(n + 1 )]
__lowerCAmelCase : List[str] = 0
for i in range(1 , n + 1 ):
__lowerCAmelCase : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
__lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
__lowerCAmelCase : Optional[int] = max_revenue_i
return max_rev[n]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list ) -> List[Any]:
if n < 0:
__lowerCAmelCase : Any = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(SCREAMING_SNAKE_CASE )
if n > len(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = (
"""Each integral piece of rod must have a corresponding price. """
F'''Got n = {n} but length of prices = {len(SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : Tuple = [6, 10, 12, 15, 20, 23]
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowerCAmelCase : Union[str, Any] = 36
__lowerCAmelCase : Optional[int] = top_down_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = bottom_up_cut_rod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = naive_cut_rod_recursive(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 232
| 0
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase :List[str] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCAmelCase :Any = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCAmelCase :Optional[int] = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : List[Any] ) -> List[Any]:
__magic_name__ : List[str] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
__magic_name__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 331
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331
| 1
|
"""simple docstring"""
def a__ ( snake_case__ = 1_00 ) -> int:
lowerCamelCase = set()
lowerCamelCase = 0
lowerCamelCase = n + 1 # maximum limit
for a in range(2 , snake_case__ ):
for b in range(2 , snake_case__ ):
lowerCamelCase = a**b # calculates the current power
collect_powers.add(snake_case__ ) # adds the result to the set
return len(snake_case__ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 369
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Tuple = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCAmelCase : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : List[Any] = 45
lowerCAmelCase : List[str] = 1581
lowerCAmelCase : List[str] = 1517
lowerCAmelCase : List[Any] = 1570
lowerCAmelCase : List[str] = 1584
lowerCAmelCase : Tuple = 1793
lowerCAmelCase : Union[str, Any] = 1795
lowerCAmelCase : Tuple = 1916
lowerCAmelCase : Tuple = 1864
lowerCAmelCase : Any = 1905
lowerCAmelCase : int = 1919
lowerCAmelCase : Union[str, Any] = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : str = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 168
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ = 60_08_51_47_51_43 ) -> int:
try:
A__ = int(lowercase_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(lowercase_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 247
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE = (720, 1280) # Height, Width
SCREAMING_SNAKE_CASE = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE = 1 / 100
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = 250
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__, A__ = get_dataset(lowercase_ , lowercase_ )
for index in range(lowercase_ ):
A__ = random.sample(range(len(lowercase_ ) ) , 4 )
A__, A__, A__ = update_image_and_anno(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , filter_scale=lowercase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ = random_chars(32 )
A__ = path.split(os.sep )[-1].rsplit("." , 1 )[0]
A__ = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
A__ = []
for anno in new_annos:
A__ = anno[3] - anno[1]
A__ = anno[4] - anno[2]
A__ = anno[1] + width / 2
A__ = anno[2] + height / 2
A__ = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase_ )
with open(f"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[list, list]:
A__ = []
A__ = []
for label_file in glob.glob(os.path.join(lowercase_ , "*.txt" ) ):
A__ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowercase_ ) as in_file:
A__ = in_file.readlines()
A__ = os.path.join(lowercase_ , f"""{label_name}.jpg""" )
A__ = []
for obj_list in obj_lists:
A__ = obj_list.rstrip("\n" ).split(" " )
A__ = float(obj[1] ) - float(obj[3] ) / 2
A__ = float(obj[2] ) - float(obj[4] ) / 2
A__ = float(obj[1] ) + float(obj[3] ) / 2
A__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase_ )
labels.append(lowercase_ )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , ) -> tuple[list, list, str]:
A__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A__ = int(scale_x * output_size[1] )
A__ = int(scale_y * output_size[0] )
A__ = []
A__ = []
for i, index in enumerate(lowercase_ ):
A__ = all_img_list[index]
path_list.append(lowercase_ )
A__ = all_annos[index]
A__ = cva.imread(lowercase_ )
if i == 0: # top-left
A__ = cva.resize(lowercase_ , (divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = bbox[2] * scale_y
A__ = bbox[3] * scale_x
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A__ = cva.resize(lowercase_ , (output_size[1] - divid_point_x, divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = bbox[2] * scale_y
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A__ = cva.resize(lowercase_ , (divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = bbox[1] * scale_x
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = bbox[3] * scale_x
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A__ = cva.resize(
lowercase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A__ = img
for bbox in img_annos:
A__ = scale_x + bbox[1] * (1 - scale_x)
A__ = scale_y + bbox[2] * (1 - scale_y)
A__ = scale_x + bbox[3] * (1 - scale_x)
A__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ = ascii_lowercase + digits
return "".join(random.choice(lowercase_ ) for _ in range(lowercase_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 247
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "realm"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=128 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=8 , UpperCAmelCase=3072 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=256 , UpperCAmelCase=10 , UpperCAmelCase=1e-3 , UpperCAmelCase=5 , UpperCAmelCase=320 , UpperCAmelCase=13353718 , UpperCAmelCase=5000 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = retriever_proj_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_candidates
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Reader config
_snake_case = span_hidden_size
_snake_case = max_span_width
_snake_case = reader_layer_norm_eps
_snake_case = reader_beam_size
_snake_case = reader_seq_len
# Retrieval config
_snake_case = num_block_records
_snake_case = searcher_beam_size
| 270
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 270
| 1
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 235
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCAmelCase_ :
"""simple docstring"""
pass
| 235
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "blip_text_model"
def __init__( self : Dict , lowerCAmelCase : Optional[int]=30524 , lowerCAmelCase : int=768 , lowerCAmelCase : Union[str, Any]=768 , lowerCAmelCase : Dict=3072 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Dict=8 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Any=1E-12 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : int=30522 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : int=0 , lowerCAmelCase : Optional[Any]=102 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=True , **lowerCAmelCase : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , sep_token_id=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = encoder_hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = projection_dim
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = is_decoder
UpperCAmelCase = use_cache
@classmethod
def a__( cls : List[Any] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : str )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : str = "blip_vision_model"
def __init__( self : List[str] , lowerCAmelCase : int=768 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : int=384 , lowerCAmelCase : Dict=16 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=1E-5 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Optional[int]=1E-10 , **lowerCAmelCase : int , )-> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = projection_dim
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = patch_size
UpperCAmelCase = image_size
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
@classmethod
def a__( cls : Dict , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Tuple )-> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "blip"
__magic_name__ : List[Any] = True
def __init__( self : Tuple , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=512 , lowerCAmelCase : int=2.6592 , lowerCAmelCase : List[str]=256 , **lowerCAmelCase : Optional[Any] , )-> str:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if text_config is None:
UpperCAmelCase = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
UpperCAmelCase = BlipTextConfig(**lowerCAmelCase )
UpperCAmelCase = BlipVisionConfig(**lowerCAmelCase )
UpperCAmelCase = self.vision_config.hidden_size
UpperCAmelCase = projection_dim
UpperCAmelCase = logit_scale_init_value
UpperCAmelCase = 1.0
UpperCAmelCase = 0.02
UpperCAmelCase = image_text_hidden_size
@classmethod
def a__( cls : str , lowerCAmelCase : BlipTextConfig , lowerCAmelCase : BlipVisionConfig , **lowerCAmelCase : Any )-> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase )
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 350
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 91
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A ( UpperCamelCase_ ):
def __init__( self : Any , lowercase_ : int , lowercase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Dict =params
_lowerCamelCase : Tuple =np.array(lowercase_ )
_lowerCamelCase : Optional[Any] =np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Union[str, Any] , lowercase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Tuple ) -> List[Any]:
"""simple docstring"""
return len(self.lengths )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.params.max_model_input_size
_lowerCamelCase : Any =self.lengths > max_len
logger.info(F'''Splitting {sum(lowercase_ )} too long sequences.''' )
def divide_chunks(lowercase_ : int , lowercase_ : List[Any] ):
return [l[i : i + n] for i in range(0 , len(lowercase_ ) , lowercase_ )]
_lowerCamelCase : Dict =[]
_lowerCamelCase : int =[]
if self.params.mlm:
_lowerCamelCase , _lowerCamelCase : Dict =self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
_lowerCamelCase , _lowerCamelCase : List[str] =self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCamelCase : Tuple =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCamelCase : Tuple =np.insert(lowercase_ , 0 , lowercase_ )
if sub_s[-1] != sep_id:
_lowerCamelCase : List[Any] =np.insert(lowercase_ , len(lowercase_ ) , lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
_lowerCamelCase : Optional[int] =np.array(lowercase_ )
_lowerCamelCase : str =np.array(lowercase_ )
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : int =len(self )
_lowerCamelCase : int =self.lengths > 11
_lowerCamelCase : Optional[int] =self.token_ids[indices]
_lowerCamelCase : List[Any] =self.lengths[indices]
_lowerCamelCase : Tuple =len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCamelCase : Dict =self.params.special_tok_ids['unk_token']
_lowerCamelCase : List[str] =len(self )
_lowerCamelCase : int =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCamelCase : List[Any] =(unk_occs / self.lengths) < 0.5
_lowerCamelCase : Tuple =self.token_ids[indices]
_lowerCamelCase : Optional[int] =self.lengths[indices]
_lowerCamelCase : Tuple =len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCamelCase ( self : List[str] , lowercase_ : int ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[t[0] for t in batch]
_lowerCamelCase : Dict =[t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
_lowerCamelCase : List[Any] =max(lowercase_ )
# Pad token ids
if self.params.mlm:
_lowerCamelCase : List[Any] =self.params.special_tok_ids['pad_token']
else:
_lowerCamelCase : Optional[Any] =self.params.special_tok_ids['unk_token']
_lowerCamelCase : Optional[Any] =[list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
_lowerCamelCase : Optional[int] =torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCamelCase : List[str] =torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 199
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 5_00_03
lowerCamelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : int =PLBartTokenizer
UpperCamelCase__ : Dict =None
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : Any =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =PLBartTokenizer(lowercase_ , language_codes='base' , keep_accents=lowercase_ )
_lowerCamelCase : Optional[int] =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : str =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : List[Any] =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : str =tokenizer.vocab_size
_lowerCamelCase : List[str] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 4 , lowercase_ )]
self.assertListEqual(lowercase_ , ['__java__', '__python__', '__en_XX__', '<mask>'] )
_lowerCamelCase : Optional[Any] ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Dict =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple =PLBartTokenizer(lowercase_ , language_codes='multi' , keep_accents=lowercase_ )
_lowerCamelCase : Any =tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCamelCase : Optional[Any] =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
_lowerCamelCase : Dict =tokenizer.vocab_size
_lowerCamelCase : Optional[int] =[tokenizer.convert_ids_to_tokens(lowercase_ ) for x in range(end - 7 , lowercase_ )]
self.assertListEqual(
lowercase_ , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
_lowerCamelCase : int ='java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
_lowerCamelCase : Any =tokenizer(lowercase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) , lowercase_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
UpperCamelCase__ : List[Any] ='uclanlp/plbart-python-en_XX'
UpperCamelCase__ : List[str] =[
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
UpperCamelCase__ : Optional[int] =[
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
UpperCamelCase__ : str =[
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase ( cls : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
_lowerCamelCase : Any =1
return cls
def lowerCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
_lowerCamelCase : Dict =[EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCamelCase : Optional[int] =self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowercase_ )
_lowerCamelCase : Tuple =10
_lowerCamelCase : Optional[int] =self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =tempfile.mkdtemp()
_lowerCamelCase : Dict =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
_lowerCamelCase : Any =PLBartTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase_ , return_tensors='pt' )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCamelCase : List[Any] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCamelCase : List[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
_lowerCamelCase : Dict =self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=10 , return_tensors='pt' )
_lowerCamelCase : List[str] =targets['input_ids']
_lowerCamelCase : Optional[Any] =shift_tokens_right(lowercase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 199
| 1
|
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer")
elif isinstance(_lowerCAmelCase, _lowerCAmelCase):
raise TypeError("Input value must be a 'int' type")
return bin(_lowerCAmelCase).count("1")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def _A ( UpperCamelCase_ : Path, UpperCamelCase_ : list) -> Optional[Any]:
'''simple docstring'''
__lowercase = "\n".join(UpperCamelCase_)
Path(UpperCamelCase_).open("w").writelines(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.source"""), UpperCamelCase_)
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.target"""), UpperCamelCase_)
return tmp_dir
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowercase ,__lowercase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, src_lang=UpperCAmelCase__, tgt_lang=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowercase = shift_tokens_right(batch["labels"], tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = LegacySeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=2_0, max_target_length=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _lowercase ( self : str ):
__lowercase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowercase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase__, UpperCAmelCase__, 1_2_8, UpperCAmelCase__ )
__lowercase = {x.name for x in tmp_dir.iterdir()}
__lowercase = {x.name for x in save_dir.iterdir()}
__lowercase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase__ ) < len(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq" )
def _lowercase ( self : List[str] ):
if not FAIRSEQ_AVAILABLE:
return
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=6_4 )
__lowercase = 6_4
__lowercase = ds.make_dynamic_sampler(UpperCAmelCase__, required_batch_size_multiple=UpperCAmelCase__ )
__lowercase = [len(UpperCAmelCase__ ) for x in batch_sampler]
assert len(set(UpperCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # no dropped or added examples
__lowercase = DataLoader(UpperCAmelCase__, batch_sampler=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = []
__lowercase = []
for batch in data_loader:
__lowercase = batch["input_ids"].shape
__lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowercase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(UpperCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase__ )
assert num_src_per_batch[0] == max(UpperCAmelCase__ )
if failures:
raise AssertionError(F"""too many tokens in {len(UpperCAmelCase__ )} batches""" )
def _lowercase ( self : Any ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=5_1_2 )
__lowercase = 2
__lowercase = ds.make_sortish_sampler(UpperCAmelCase__, shuffle=UpperCAmelCase__ )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2, sampler=UpperCAmelCase__ )
__lowercase = tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any]="input_ids" ):
return [batch[k].eq(UpperCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) ) < sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) )
assert sum(count_pad_tokens(UpperCAmelCase__ ) ) < sum(count_pad_tokens(UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int=1_0_0_0, UpperCAmelCase__ : str=1_2_8 ):
if os.getenv("USE_REAL_DATA", UpperCAmelCase__ ):
__lowercase = "examples/seq2seq/wmt_en_ro"
__lowercase = max_len * 2 * 6_4
if not Path(UpperCAmelCase__ ).joinpath("train.len" ).exists():
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
else:
__lowercase = "examples/seq2seq/test_data/wmt_en_ro"
__lowercase = max_len * 4
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, n_obs=UpperCAmelCase__, )
return ds, max_tokens, tokenizer
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset()
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=0, add_extra_examples=UpperCAmelCase__ ) )
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=1, add_extra_examples=UpperCAmelCase__ ) )
assert idsa.intersection(UpperCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__, use_fast=UpperCAmelCase__ )
if tok_name == MBART_TINY:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", )
__lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, )
__lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase__ ) == 0
| 144
| 0
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase__ = logging.get_logger(__name__)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), F'{len(_SCREAMING_SNAKE_CASE )} != {len(_SCREAMING_SNAKE_CASE )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
try:
a__: Dict = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(_SCREAMING_SNAKE_CASE ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "student" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) ->Optional[int]:
a__: Optional[int] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ).save_pretrained(_SCREAMING_SNAKE_CASE ) # purely for convenience
a__: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ).eval()
else:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'teacher must be a model or string got type {type(_SCREAMING_SNAKE_CASE )}'
a__: Optional[Any] = teacher.config.to_diff_dict()
try:
a__ , a__: Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__: Any = teacher_e
if d is None:
a__: Optional[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
a__ , a__: Optional[int] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__: Tuple = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__: int = teacher_e
if d is None:
a__: Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(_SCREAMING_SNAKE_CASE )
# Copy weights
a__: Union[str, Any] = teacher.config_class(**_SCREAMING_SNAKE_CASE )
a__: List[Any] = AutoModelForSeqaSeqLM.from_config(_SCREAMING_SNAKE_CASE )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__: Tuple = student.load_state_dict(teacher.state_dict() , strict=_SCREAMING_SNAKE_CASE )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__: str = list(range(_SCREAMING_SNAKE_CASE ) ), list(range(_SCREAMING_SNAKE_CASE ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(_SCREAMING_SNAKE_CASE )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__: Optional[int] = pick_layers_to_copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if d_layers_to_copy is None:
a__: List[str] = pick_layers_to_copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
if hasattr(
_SCREAMING_SNAKE_CASE , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , _SCREAMING_SNAKE_CASE )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , _SCREAMING_SNAKE_CASE )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , _SCREAMING_SNAKE_CASE )
copy_layers(teacher.decoder.block , student.decoder.block , _SCREAMING_SNAKE_CASE )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
a__: Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 290
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase_ : str = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , __A , )
super().__init__(*__A , **__A )
| 81
| 0
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = BertJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : List[str] = True
def snake_case_ ( self : List[str] ):
super().setUp()
_UpperCAmelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self : List[str] , A : Union[str, Any] ):
_UpperCAmelCase : Dict = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase : Tuple = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def snake_case_ ( self : List[str] , A : List[Any] ):
_UpperCAmelCase : Dict = self.get_input_output_texts(__a )
_UpperCAmelCase : List[str] = tokenizer.encode(__a , add_special_tokens=__a )
_UpperCAmelCase : int = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : Union[str, Any] ):
pass # TODO add if relevant
def snake_case_ ( self : str ):
pass # TODO add if relevant
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : str = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : Tuple = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__a )
_UpperCAmelCase : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase : List[str] = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__a , "wb" ) as handle:
pickle.dump(__a , __a )
with open(__a , "rb" ) as handle:
_UpperCAmelCase : Any = pickle.load(__a )
_UpperCAmelCase : Dict = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self : Optional[Any] ):
try:
_UpperCAmelCase : int = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self : List[Any] ):
try:
_UpperCAmelCase : Dict = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = MecabTokenizer(do_lower_case=__a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self : Tuple ):
try:
_UpperCAmelCase : Union[str, Any] = MecabTokenizer(
do_lower_case=__a , normalize_text=__a , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : str = MecabTokenizer(normalize_text=__a , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__a )
_UpperCAmelCase : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__a , "wb" ) as handle:
pickle.dump(__a , __a )
with open(__a , "rb" ) as handle:
_UpperCAmelCase : List[str] = pickle.load(__a )
_UpperCAmelCase : int = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
@require_sudachi
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def snake_case_ ( self : str ):
_UpperCAmelCase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : List[Any] = SudachiTokenizer(do_lower_case=__a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Dict = SudachiTokenizer(normalize_text=__a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = SudachiTokenizer(trim_whitespace=__a , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def snake_case_ ( self : Dict ):
_UpperCAmelCase : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__a )
_UpperCAmelCase : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
_UpperCAmelCase : Any = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__a , "wb" ) as handle:
pickle.dump(__a , __a )
with open(__a , "rb" ) as handle:
_UpperCAmelCase : List[str] = pickle.load(__a )
_UpperCAmelCase : Any = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
@require_jumanpp
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = JumanppTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = JumanppTokenizer(normalize_text=__a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = JumanppTokenizer(trim_whitespace=__a )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Any = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_UpperCAmelCase : Any = {}
for i, token in enumerate(__a ):
_UpperCAmelCase : str = i
_UpperCAmelCase : List[Any] = WordpieceTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
_UpperCAmelCase : Dict = tokenizer.subword_tokenizer
_UpperCAmelCase : List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__a , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
_UpperCAmelCase : List[Any] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__a , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
_UpperCAmelCase : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=__a )
_UpperCAmelCase : str = tokenizer.encode("どういたしまして。" , add_special_tokens=__a )
_UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = BertJapaneseTokenizer
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def snake_case_ ( self : Optional[Any] ):
super().setUp()
_UpperCAmelCase : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self : Tuple , **A : Dict ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__a )
def snake_case_ ( self : Any , A : Union[str, Any] ):
_UpperCAmelCase : Dict = 'こんにちは、世界。 \nこんばんは、世界。'
_UpperCAmelCase : str = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def snake_case_ ( self : Any ):
pass # TODO add if relevant
def snake_case_ ( self : int ):
pass # TODO add if relevant
def snake_case_ ( self : List[Any] ):
pass # TODO add if relevant
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
_UpperCAmelCase : Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__a , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_UpperCAmelCase : Union[str, Any] = {}
for i, token in enumerate(__a ):
_UpperCAmelCase : Union[str, Any] = i
_UpperCAmelCase : int = CharacterTokenizer(vocab=__a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
_UpperCAmelCase : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=__a )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=__a )
_UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(__a , __a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Dict = 'cl-tohoku/bert-base-japanese'
_UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
_UpperCAmelCase : str = 'bert-base-cased'
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__a )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 350
|
"""simple docstring"""
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self : Dict , A : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase : dict = {}
# Stores current size of heap.
_UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase : Any = key or (lambda A : x)
def snake_case_ ( self : List[Any] , A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self : Optional[int] , A : int , A : int ):
_UpperCAmelCase , _UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase : Any = self.arr[j], self.arr[i]
def snake_case_ ( self : List[str] , A : int , A : int ):
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : str = self._left(A )
_UpperCAmelCase : str = self._right(A )
_UpperCAmelCase : List[Any] = i
if left is not None and not self._cmp(A , A ):
_UpperCAmelCase : Optional[int] = left
if right is not None and not self._cmp(A , A ):
_UpperCAmelCase : Any = right
return valid_parent
def snake_case_ ( self : Tuple , A : int ):
_UpperCAmelCase : Tuple = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Dict = parent, self._parent(A )
def snake_case_ ( self : Optional[int] , A : int ):
_UpperCAmelCase : Tuple = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = valid_parent, self._get_valid_parent(A )
def snake_case_ ( self : Dict , A : int , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : Any = self.pos_map[item]
_UpperCAmelCase : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : List[str] , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase : Tuple = self.arr[self.size - 1]
_UpperCAmelCase : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : Any , A : int , A : int ):
_UpperCAmelCase : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
_UpperCAmelCase : Any = [item, self.key(A )]
_UpperCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self : Tuple ):
return self.arr[0] if self.size else None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A =[
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def _UpperCamelCase ( UpperCamelCase__ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase__ : Dict = k.replace(_UpperCAmelCase , _UpperCAmelCase )
if k.startswith("""encoder""" ):
UpperCAmelCase__ : Tuple = k.replace(""".attn""" , """.self_attn""" )
UpperCAmelCase__ : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase__ : Optional[Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
UpperCAmelCase__ : Union[str, Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase__ : Tuple = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
UpperCAmelCase__ : List[str] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCAmelCase__ : Optional[int] = sd.pop(_UpperCAmelCase )
UpperCAmelCase__ : Any = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
UpperCAmelCase__ : List[str] = v
__A =['''START''']
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = torch.load(_UpperCAmelCase , map_location="""cpu""" )
UpperCAmelCase__ : Union[str, Any] = model["""model"""]
UpperCAmelCase__ : Union[str, Any] = BlenderbotConfig.from_json_file(_UpperCAmelCase )
UpperCAmelCase__ : Any = BlenderbotForConditionalGeneration(_UpperCAmelCase )
UpperCAmelCase__ : Optional[int] = m.model.state_dict().keys()
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase__ : int = rename_state_dict_key(_UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase__ : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_UpperCAmelCase )
m.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
m.half()
m.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
__A =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 163
|
from collections import defaultdict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = first_str.lower().strip()
__a = second_str.lower().strip()
# Remove whitespace
__a = first_str.replace(''' ''' , '''''' )
__a = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
__a = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = input('''Enter the first string ''').strip()
__snake_case :int = input('''Enter the second string ''').strip()
__snake_case :int = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 49
| 0
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def _snake_case ( _snake_case : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a = parser.parse_args()
a = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 271
|
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( _snake_case : int ) -> Any:
'''simple docstring'''
random.seed(_snake_case )
np.random.seed(_snake_case )
torch.manual_seed(_snake_case )
torch.cuda.manual_seed_all(_snake_case )
# ^^ safe to call this function even if cuda is not available
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Iterable[torch.nn.Parameter] , _UpperCAmelCase : float = 0.9999 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[float, int] = 1.0 , _UpperCAmelCase : Union[float, int] = 2 / 3 , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Dict[str, Any] = None , **_UpperCAmelCase : Optional[int] , ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_A = True
if kwargs.get('max_value' , _UpperCAmelCase ) is not None:
_A = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['max_value']
if kwargs.get('min_value' , _UpperCAmelCase ) is not None:
_A = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
_A = kwargs['min_value']
_A = list(_UpperCAmelCase )
_A = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _UpperCAmelCase ) is not None:
_A = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
self.to(device=kwargs['device'] )
_A = None
_A = decay
_A = min_decay
_A = update_after_step
_A = use_ema_warmup
_A = inv_gamma
_A = power
_A = 0
_A = None # set in `step()`
_A = model_cls
_A = model_config
@classmethod
def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ):
_A , _A = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase )
_A = model_cls.from_pretrained(_UpperCAmelCase )
_A = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config )
ema_model.load_state_dict(_UpperCAmelCase )
return ema_model
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_A = self.model_cls.from_config(self.model_config )
_A = self.state_dict()
state_dict.pop('shadow_params' , _UpperCAmelCase )
model.register_to_config(**_UpperCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int ):
_A = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_A = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_A = (1 + step) / (10 + step)
_A = min(_UpperCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_A = max(_UpperCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if isinstance(_UpperCAmelCase , torch.nn.Module ):
_A = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , )
_A = parameters.parameters()
_A = list(_UpperCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_A = self.get_decay(self.optimization_step )
_A = decay
_A = 1 - decay
_A = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_A = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = list(_UpperCAmelCase )
for s_param, param in zip(self.shadow_params , _UpperCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None ):
_A = [
p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
_A = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_A = None
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : dict ):
_A = copy.deepcopy(_UpperCAmelCase )
_A = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_A = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _UpperCAmelCase ):
raise ValueError('Invalid min_decay' )
_A = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _UpperCAmelCase ):
raise ValueError('Invalid optimization_step' )
_A = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _UpperCAmelCase ):
raise ValueError('Invalid update_after_step' )
_A = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _UpperCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_A = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_A = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_A = state_dict.get('shadow_params' , _UpperCAmelCase )
if shadow_params is not None:
_A = shadow_params
if not isinstance(self.shadow_params , _UpperCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 271
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase :Union[str, Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[int] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
_UpperCAmelCase : Optional[Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
_UpperCAmelCase : Union[str, Any] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCAmelCase : Optional[Any] = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
_UpperCAmelCase : List[Any] = text_classifier('''This is great !''' , return_all_scores=A )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
_UpperCAmelCase : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
_UpperCAmelCase : Tuple = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=A )
self.assertEqual(
nested_simplify(A ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
_UpperCAmelCase : Optional[Any] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
_UpperCAmelCase : str = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
_UpperCAmelCase : Any = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Union[str, Any] = pipeline('''text-classification''' )
_UpperCAmelCase : Optional[int] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Optional[int] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : str = pipeline('''text-classification''' , framework='''tf''' )
_UpperCAmelCase : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
_UpperCAmelCase : List[Any] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
_UpperCAmelCase : Union[str, Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(A ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def __lowerCAmelCase ( self , A , A , A ) -> int:
_UpperCAmelCase : Any = TextClassificationPipeline(model=A , tokenizer=A )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __lowerCAmelCase ( self , A , A ) -> Optional[int]:
_UpperCAmelCase : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_UpperCAmelCase : int = '''HuggingFace is in'''
_UpperCAmelCase : Tuple = text_classifier(A )
self.assertEqual(nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
_UpperCAmelCase : Any = ['''HuggingFace is in ''', '''Paris is in France''']
_UpperCAmelCase : Optional[Any] = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}, {'''label''': ANY(A ), '''score''': ANY(A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_UpperCAmelCase : int = text_classifier(A , top_k=A )
_UpperCAmelCase : List[str] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(A ) , [[{'''label''': ANY(A ), '''score''': ANY(A )}] * N, [{'''label''': ANY(A ), '''score''': ANY(A )}] * N] , )
_UpperCAmelCase : List[str] = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_UpperCAmelCase : Optional[Any] = text_classifier(A )
self.assertEqual(
nested_simplify(A ) , {'''label''': ANY(A ), '''score''': ANY(A )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_UpperCAmelCase : List[str] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(A ):
text_classifier(A )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_UpperCAmelCase : Optional[Any] = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(A ) , [{'''label''': ANY(A ), '''score''': ANY(A )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 263
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =IFImgaImgSuperResolutionPipeline
a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
a__ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
return self._get_superresolution_dummy_components()
def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Any = torch.manual_seed(A )
else:
_UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
_UpperCAmelCase : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 263
| 1
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__A : List[str] = logging.get_logger(__name__)
__A : Optional[int] = '''T5Config'''
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = jnp.zeros_like(_lowercase )
A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A = shifted_input_ids.at[:, 0].set(_lowercase )
A = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __UpperCamelCase ( a__ ):
SCREAMING_SNAKE_CASE = """mt5"""
SCREAMING_SNAKE_CASE = MTaConfig
class __UpperCamelCase ( a__ ):
SCREAMING_SNAKE_CASE = """mt5"""
SCREAMING_SNAKE_CASE = MTaConfig
class __UpperCamelCase ( a__ ):
SCREAMING_SNAKE_CASE = """mt5"""
SCREAMING_SNAKE_CASE = MTaConfig
| 362
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = tempfile.mkdtemp()
A = BlipImageProcessor()
A = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
A = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self : Dict , **__SCREAMING_SNAKE_CASE : Any):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).tokenizer
def SCREAMING_SNAKE_CASE__ (self : Tuple , **__SCREAMING_SNAKE_CASE : int):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE).image_processor
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
A = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ (self : Any):
A = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
A = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
A = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = self.prepare_image_inputs()
A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np")
A = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = processor(text=__SCREAMING_SNAKE_CASE)
A = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "lower newer"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "input_ids", "attention_mask"])
| 57
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , a_ : bool = True , a_ : Optional[Dict[str, int]] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_55 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , **a_ : List[str] , ):
super().__init__(**a_ )
lowerCAmelCase_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
lowerCAmelCase_ : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
lowerCAmelCase_ : Optional[int] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
lowerCAmelCase_ : Optional[Any] = get_size_dict(a_ )
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : Union[str, Any] = size
lowerCAmelCase_ : List[str] = resample
lowerCAmelCase_ : str = do_center_crop
lowerCAmelCase_ : List[Any] = crop_size
lowerCAmelCase_ : Tuple = do_rescale
lowerCAmelCase_ : List[Any] = rescale_factor
lowerCAmelCase_ : Dict = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[int] , ):
lowerCAmelCase_ : str = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCAmelCase_ : List[Any] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : Tuple , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : int , ):
lowerCAmelCase_ : str = get_size_dict(a_ )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def lowerCamelCase ( self : Tuple , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[str] ):
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : Optional[int] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ):
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def lowerCamelCase ( self : Optional[int] , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Dict[str, int] = None , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Optional[int] , ):
lowerCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Any = size if size is not None else self.size
lowerCAmelCase_ : Tuple = get_size_dict(a_ , default_to_square=a_ )
lowerCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Any = get_size_dict(a_ )
lowerCAmelCase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : int = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Any = [to_numpy_array(a_ ) for image in images]
if do_resize:
lowerCAmelCase_ : List[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[str] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : List[str] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : Optional[Any] = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
lowerCAmelCase_ : Optional[int] = [to_channel_dimension_format(a_ , a_ ) for image in images]
lowerCAmelCase_ : str = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 241
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = ["""image_processor""", """tokenizer"""]
a_ : List[str] = """ViTImageProcessor"""
a_ : List[str] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , a_ : str=None , a_ : Dict=None , **a_ : List[Any] ):
lowerCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Union[str, Any] , a_ : Any=None , a_ : Dict=None , a_ : List[str]=None , a_ : str=None , **a_ : Any ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None:
lowerCAmelCase_ : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if images is not None:
lowerCAmelCase_ : List[str] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if visual_prompt is not None and images is not None:
lowerCAmelCase_ : Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase_ : Dict = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def lowerCamelCase ( self : Optional[int] , *a_ : Optional[Any] , **a_ : List[str] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : Tuple , **a_ : Tuple ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Dict ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 241
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
A__ = abs(SCREAMING_SNAKE_CASE__ )
A__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
A__ = abs(SCREAMING_SNAKE_CASE__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE__ ) for c in str(abs(SCREAMING_SNAKE_CASE__ ) ) )
def _snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int ) -> None:
A__ = f'{func.__name__}({value})'
A__ = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(SCREAMING_SNAKE_CASE__ )} -- {timing:.4f} seconds' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 282
|
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ : list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
A__ = {'+', '-', '*', '/'}
A__ = []
for token in postfix_notation:
if token in operations:
A__ , A__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase : Tuple = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(lowercase_ )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Any = """rag"""
lowerCAmelCase__ : List[Any] = True
def __init__(self : Dict , UpperCamelCase : List[Any]=None , UpperCamelCase : str=True , UpperCamelCase : List[Any]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=" / " , UpperCamelCase : Union[str, Any]=" // " , UpperCamelCase : List[str]=5 , UpperCamelCase : Tuple=300 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : int=8 , UpperCamelCase : str="wiki_dpr" , UpperCamelCase : Optional[Any]="train" , UpperCamelCase : Any="compressed" , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=True , UpperCamelCase : int=False , UpperCamelCase : Any=False , UpperCamelCase : Any=False , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__ = kwargs.pop('''question_encoder''' )
lowercase__ = question_encoder_config.pop('''model_type''' )
lowercase__ = kwargs.pop('''generator''' )
lowercase__ = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase )
lowercase__ = reduce_loss
lowercase__ = label_smoothing
lowercase__ = exclude_bos_score
lowercase__ = do_marginalize
lowercase__ = title_sep
lowercase__ = doc_sep
lowercase__ = n_docs
lowercase__ = max_combined_length
lowercase__ = dataset
lowercase__ = dataset_split
lowercase__ = index_name
lowercase__ = retrieval_vector_size
lowercase__ = retrieval_batch_size
lowercase__ = passages_path
lowercase__ = index_path
lowercase__ = use_dummy_dataset
lowercase__ = output_retrieved
lowercase__ = do_deduplication
lowercase__ = use_cache
if self.forced_eos_token_id is None:
lowercase__ = getattr(self.generator , '''forced_eos_token_id''' , UpperCamelCase )
@classmethod
def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : int ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.question_encoder.to_dict()
lowercase__ = self.generator.to_dict()
lowercase__ = self.__class__.model_type
return output
| 2
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[Any] ="beit"
def __init__( self ,_snake_case=81_92 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=2_24 ,_snake_case=16 ,_snake_case=3 ,_snake_case=False ,_snake_case=False ,_snake_case=False ,_snake_case=False ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=True ,_snake_case=[3, 5, 7, 11] ,_snake_case=[1, 2, 3, 6] ,_snake_case=True ,_snake_case=0.4 ,_snake_case=2_56 ,_snake_case=1 ,_snake_case=False ,_snake_case=2_55 ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = use_mask_token
UpperCAmelCase_ : Optional[int] = use_absolute_position_embeddings
UpperCAmelCase_ : Optional[int] = use_relative_position_bias
UpperCAmelCase_ : Union[str, Any] = use_shared_relative_position_bias
UpperCAmelCase_ : str = layer_scale_init_value
UpperCAmelCase_ : List[Any] = drop_path_rate
UpperCAmelCase_ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase_ : Dict = out_indices
UpperCAmelCase_ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase_ : Any = use_auxiliary_head
UpperCAmelCase_ : Union[str, Any] = auxiliary_loss_weight
UpperCAmelCase_ : Optional[int] = auxiliary_channels
UpperCAmelCase_ : List[Any] = auxiliary_num_convs
UpperCAmelCase_ : List[str] = auxiliary_concat_input
UpperCAmelCase_ : Any = semantic_loss_ignore_index
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] =version.parse("1.11")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase__ ( self ):
return 1E-4
| 365
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] =["torch", "torchsde"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def UpperCamelCase__ ( cls ,*_snake_case ,**_snake_case ):
requires_backends(cls ,["torch", "torchsde"] )
| 67
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , ) -> Union[str, Any]:
"""simple docstring"""
A : int = size if size is not None else {'''height''': 18, '''width''': 18}
A : List[str] = parent
A : str = batch_size
A : List[Any] = num_channels
A : Optional[int] = image_size
A : Optional[int] = min_resolution
A : Dict = max_resolution
A : Optional[int] = do_resize
A : List[Any] = size
A : Union[str, Any] = apply_ocr
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''apply_ocr''' ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
A : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE )
# Test batched
A : str = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
A : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A : Any = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
A : str = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A : List[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE )
# with apply_OCR = False
A : Any = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE )
A : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 3
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =tempfile.mkdtemp()
__UpperCamelCase : Optional[int] =8
# DPR tok
__UpperCamelCase : str =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase : Optional[int] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase : str =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__UpperCamelCase : Optional[int] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase : Any ={'unk_token': '<unk>'}
__UpperCamelCase : Any =os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__UpperCamelCase : Any =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Dict =os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =os.path.join(self.tmpdirname , 'rag_tokenizer' )
__UpperCamelCase : Dict =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__UpperCamelCase : List[Any] =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase__ )
rag_tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =RagTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : int =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__UpperCamelCase : Union[str, Any] =[
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__UpperCamelCase : Any =tokenizer(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 71
| 0
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCAmelCase: Dict = numpy.array([0, 0])
UpperCAmelCase: Tuple = numpy.array([0.5, 0.8_660_254])
UpperCAmelCase: Any = numpy.array([1, 0])
UpperCAmelCase: Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Tuple = initial_vectors
for _ in range(_UpperCAmelCase ):
_lowercase : Tuple = iteration_step(_UpperCAmelCase )
return vectors
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowercase : Union[str, Any] = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
_lowercase : Union[str, Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : int = numpy.radians(_UpperCAmelCase )
_lowercase : Tuple = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
_lowercase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[str] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowercase : List[Any] = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase: Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 350
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCAmelCase: Any = generate_large_matrix()
UpperCAmelCase: Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
_lowercase : List[Any] = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowercase : Tuple = (left + right) // 2
_lowercase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowercase : Dict = mid + 1
else:
_lowercase : Dict = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Any = 0
_lowercase : Optional[int] = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
_lowercase : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("""Running benchmarks""" )
_lowercase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowercase : Dict = timeit(F"""{func}(grid=grid)""" , setup=__UpperCAmelCase , number=500 )
print(F"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 336
| 0
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 2000000 ) -> int:
'''simple docstring'''
A__ = [0]
A__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A__ = 0
# the area corresponding to the grid that gives the product closest to target
A__ = 0
# an estimate of b, using the quadratic formula
A__ = 42
# the largest integer less than b_estimate
A__ = 42
# the largest integer less than b_estimate
A__ = 42
# the triangle number corresponding to b_floor
A__ = 42
# the triangle number corresponding to b_ceil
A__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A__ = floor(__snake_case )
A__ = ceil(__snake_case )
A__ = triangle_numbers[b_floor]
A__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A__ = triangle_b_first_guess * triangle_a
A__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A__ = triangle_b_second_guess * triangle_a
A__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7
|
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134
| 0
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A ( unittest.TestCase ):
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _snake_case ( self ):
lowerCamelCase =self.dummy_uncond_unet
lowerCamelCase =PNDMScheduler()
lowerCamelCase =PNDMPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
pndm.to(UpperCAmelCase_ )
pndm.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.manual_seed(0 )
lowerCamelCase =pndm(generator=UpperCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" ).images
lowerCamelCase =torch.manual_seed(0 )
lowerCamelCase =pndm(generator=UpperCAmelCase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase_ )[0]
lowerCamelCase =image[0, -3:, -3:, -1]
lowerCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase ="""google/ddpm-cifar10-32"""
lowerCamelCase =UNetaDModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase =PNDMScheduler()
lowerCamelCase =PNDMPipeline(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
pndm.to(UpperCAmelCase_ )
pndm.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.manual_seed(0 )
lowerCamelCase =pndm(generator=UpperCAmelCase_ , output_type="""numpy""" ).images
lowerCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase =np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 262
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] =16
UpperCAmelCase__ : Any =32
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> int:
lowerCamelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase =datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase =16
elif accelerator.mixed_precision != "no":
lowerCamelCase =8
else:
lowerCamelCase =None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Dict =mocked_dataloaders # noqa: F811
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase =2
# New Code #
lowerCamelCase =int(args.gradient_accumulation_steps )
lowerCamelCase =int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase =config["""lr"""]
lowerCamelCase =int(config["""num_epochs"""] )
lowerCamelCase =int(config["""seed"""] )
lowerCamelCase =int(config["""batch_size"""] )
lowerCamelCase =evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase =get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase =AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase =get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def _lowercase ( ) -> Any:
lowerCamelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=_UpperCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase =parser.parse_args()
lowerCamelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 262
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''distilbert'''
UpperCAmelCase_ : Tuple = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=512 , __lowerCAmelCase=False , __lowerCAmelCase=6 , __lowerCAmelCase=12 , __lowerCAmelCase=768 , __lowerCAmelCase=4 * 768 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.02 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.2 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = sinusoidal_pos_embds
lowerCAmelCase = n_layers
lowerCAmelCase = n_heads
lowerCAmelCase = dim
lowerCAmelCase = hidden_dim
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation
lowerCAmelCase = initializer_range
lowerCAmelCase = qa_dropout
lowerCAmelCase = seq_classif_dropout
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 272
|
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase ( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCamelCase ) == 0:
return b
elif (
function(_UpperCamelCase ) * function(_UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(_UpperCamelCase ) == 0:
return mid
elif function(_UpperCamelCase ) * function(_UpperCamelCase ) < 0:
__UpperCAmelCase : Union[str, Any] = mid
else:
__UpperCAmelCase : List[Any] = mid
__UpperCAmelCase : Optional[Any] = start + (end - start) / 2.0
return mid
def lowerCamelCase ( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 115
| 0
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( A__ , A__ , A__ ) -> int:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def A_ ( A__ , A__ , A__ , A__="attention" ) -> Union[str, Any]:
a__ : Any = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
a__ : Any = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
a__ : int = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
a__ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
a__ : Any = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
a__ : Tuple = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
a__ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
a__ : Tuple = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A_ ( A__ , A__ , A__ , A__=False ) -> Optional[Any]:
if split_mlp_wi:
a__ : Union[str, Any] = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
a__ : Dict = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
a__ : List[str] = (wi_a, wi_a)
else:
a__ : str = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
a__ : List[Any] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def A_ ( A__ , A__ , A__ , A__ ) -> List[str]:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def A_ ( A__ , *, A__ , A__ , A__ = False ) -> List[str]:
a__ : Dict = traverse_util.flatten_dict(variables['target'] )
a__ : str = {'/'.join(A__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a__ : List[Any] = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , A__ )
a__ : Any = collections.OrderedDict()
# Shared embeddings.
a__ : Union[str, Any] = old['token_embedder/embedding']
# Encoder.
for i in range(A__ ):
# Block i, layer 0 (Self Attention).
a__ : List[str] = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_attention_layer_norm' )
a__ : Tuple = tax_attention_lookup(A__ , A__ , 'encoder' , 'attention' )
a__ : Optional[int] = layer_norm
a__ : List[Any] = k.T
a__ : Dict = o.T
a__ : Dict = q.T
a__ : List[str] = v.T
# Block i, layer 1 (MLP).
a__ : Any = tax_layer_norm_lookup(A__ , A__ , 'encoder' , 'pre_mlp_layer_norm' )
a__ : Any = tax_mlp_lookup(A__ , A__ , 'encoder' , A__ )
a__ : Optional[Any] = layer_norm
if split_mlp_wi:
a__ : List[Any] = wi[0].T
a__ : Union[str, Any] = wi[1].T
else:
a__ : Optional[int] = wi.T
a__ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ : Union[str, Any] = tax_relpos_bias_lookup(
A__ , A__ , 'encoder' ).T
a__ : str = old['encoder/encoder_norm/scale']
if not scalable_attention:
a__ : List[str] = tax_relpos_bias_lookup(
A__ , 0 , 'encoder' ).T
a__ : str = tax_relpos_bias_lookup(
A__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(A__ ):
# Block i, layer 0 (Self Attention).
a__ : Any = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_self_attention_layer_norm' )
a__ : Tuple = tax_attention_lookup(A__ , A__ , 'decoder' , 'self_attention' )
a__ : Any = layer_norm
a__ : Optional[int] = k.T
a__ : List[Any] = o.T
a__ : Any = q.T
a__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
a__ : Any = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_cross_attention_layer_norm' )
a__ : List[Any] = tax_attention_lookup(A__ , A__ , 'decoder' , 'encoder_decoder_attention' )
a__ : Any = layer_norm
a__ : Tuple = k.T
a__ : Union[str, Any] = o.T
a__ : int = q.T
a__ : str = v.T
# Block i, layer 2 (MLP).
a__ : Union[str, Any] = tax_layer_norm_lookup(A__ , A__ , 'decoder' , 'pre_mlp_layer_norm' )
a__ : Optional[Any] = tax_mlp_lookup(A__ , A__ , 'decoder' , A__ )
a__ : Any = layer_norm
if split_mlp_wi:
a__ : Any = wi[0].T
a__ : Union[str, Any] = wi[1].T
else:
a__ : str = wi.T
a__ : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
a__ : Tuple = tax_relpos_bias_lookup(A__ , A__ , 'decoder' ).T
a__ : Optional[int] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a__ : Tuple = old['decoder/logits_dense/kernel'].T
return new
def A_ ( A__ , A__ ) -> List[Any]:
a__ : List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a__ : Optional[int] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a__ : int = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
a__ : List[str] = state_dict['shared.weight']
return state_dict
def A_ ( A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
a__ : Union[str, Any] = checkpoints.load_tax_checkpoint(A__ )
a__ : List[str] = convert_tax_to_pytorch(
A__ , num_layers=config.num_layers , is_encoder_only=A__ , scalable_attention=A__ )
a__ : Tuple = make_state_dict(A__ , A__ )
model.load_state_dict(A__ , strict=A__ )
def A_ ( A__ , A__ , A__ , A__ = False , A__ = False , ) -> int:
a__ : List[str] = MTaConfig.from_json_file(A__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a__ : Optional[Any] = UMTaEncoderModel(A__ )
else:
a__ : int = UMTaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A__ , A__ , A__ , A__ , A__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(A__ )
# Verify that we can load the checkpoint.
model.from_pretrained(A__ )
print('Done' )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowercase : str = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 353
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2) -> Any:
'''simple docstring'''
a__ : Tuple = bp_numa
a__ : Union[str, Any] = bp_numa
a__ : Optional[int] = bp_numa
a__ : Optional[int] = conva_get[:2]
a__ : Optional[Any] = conva_get[2]
a__ : Optional[int] = size_pa
a__ : Union[str, Any] = rate_w
a__ : Dict = rate_t
a__ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Any = -2 * np.random.rand(self.conva[1]) + 1
a__ : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
a__ : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowercase , 'wb') as f:
pickle.dump(lowercase , lowercase)
print(F'Model saved: {save_path}')
@classmethod
def __lowercase ( cls , lowercase) -> Any:
'''simple docstring'''
with open(lowercase , 'rb') as f:
a__ : Any = pickle.load(lowercase) # noqa: S301
a__ : Dict = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a__ : Tuple = model_dic.get('size_pooling1')
a__ : Optional[int] = model_dic.get('num_bp1')
a__ : Tuple = model_dic.get('num_bp2')
a__ : Optional[Any] = model_dic.get('num_bp3')
a__ : Optional[Any] = model_dic.get('rate_weight')
a__ : int = model_dic.get('rate_thre')
# create model instance
a__ : Union[str, Any] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase)
# modify model parameter
a__ : str = model_dic.get('w_conv1')
a__ : Optional[int] = model_dic.get('wkj')
a__ : Tuple = model_dic.get('vji')
a__ : str = model_dic.get('thre_conv1')
a__ : List[str] = model_dic.get('thre_bp2')
a__ : Tuple = model_dic.get('thre_bp3')
return conv_ins
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return round(lowercase , 3)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = convs[0]
a__ : Tuple = convs[1]
a__ : Any = np.shape(lowercase)[0]
# get the data slice of original image data, data_focus
a__ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase):
a__ : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase)
# calculate the feature map of every single kernel, and saved as list of matrix
a__ : str = []
a__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowercase):
a__ : Tuple = []
for i_focus in range(len(lowercase)):
a__ : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase))
a__ : Dict = np.asmatrix(lowercase).reshape(
lowercase , lowercase)
data_featuremap.append(lowercase)
# expanding the data slice to One dimenssion
a__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase))
a__ : Optional[int] = np.asarray(lowercase)
return focus_list, data_featuremap
def __lowercase ( self , lowercase , lowercase , lowercase="average_pool") -> str:
'''simple docstring'''
a__ : Any = len(featuremaps[0])
a__ : int = int(size_map / size_pooling)
a__ : Optional[Any] = []
for i_map in range(len(lowercase)):
a__ : Any = featuremaps[i_map]
a__ : Optional[int] = []
for i_focus in range(0 , lowercase , lowercase):
for j_focus in range(0 , lowercase , lowercase):
a__ : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase))
a__ : List[str] = np.asmatrix(lowercase).reshape(lowercase , lowercase)
featuremap_pooled.append(lowercase)
return featuremap_pooled
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = []
for i in range(len(lowercase)):
a__ : Tuple = np.shape(data[i])
a__ : List[str] = data[i].reshape(1 , shapes[0] * shapes[1])
a__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase)
a__ : Union[str, Any] = np.asarray(lowercase)
return data_expanded
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = np.asarray(lowercase)
a__ : Optional[int] = np.shape(lowercase)
a__ : Any = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : int = []
a__ : Optional[int] = 0
for i_map in range(lowercase):
a__ : Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , lowercase , lowercase):
for j in range(0 , lowercase , lowercase):
a__ : Union[str, Any] = pd_pool[
i_pool
]
a__ : Tuple = i_pool + 1
a__ : Optional[int] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(lowercase)
return pd_all
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool) -> str:
'''simple docstring'''
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(lowercase)))
print((' - - Shape: Teach_Data ', np.shape(lowercase)))
a__ : Dict = 0
a__ : List[Any] = []
a__ : Optional[int] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a__ : Dict = 0
print(F'-------------Learning Time {rp}--------------')
for p in range(len(lowercase)):
# print('------------Learning Image: %d--------------'%p)
a__ : Dict = np.asmatrix(datas_train[p])
a__ : Any = np.asarray(datas_teach[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Dict = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[Any] = np.shape(lowercase)
a__ : Union[str, Any] = self._expand(lowercase)
a__ : List[Any] = data_bp_input
a__ : Tuple = np.dot(lowercase , self.vji.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
a__ : Any = np.dot(lowercase , self.wkj.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a__ : Any = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Optional[Any] = np.multiply(
np.dot(lowercase , self.wkj) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Tuple = np.dot(lowercase , self.vji)
a__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
a__ : List[str] = pd_conva_pooled.T.getA().tolist()
a__ : str = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv])
a__ : int = self.rate_weight * np.dot(lowercase , lowercase)
a__ : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a__ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a__ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
a__ : Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a__ : List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a__ : Any = rp + 1
a__ : Optional[Any] = error_count / patterns
all_mse.append(lowercase)
def draw_error():
a__ : int = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowercase , '+-')
plt.plot(lowercase , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(lowercase , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, F' - - Mse: {mse:.6f}'))
if draw_e:
draw_error()
return mse
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(lowercase)))
for p in range(len(lowercase)):
a__ : int = np.asmatrix(datas_test[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : str = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[int] = self._expand(lowercase)
a__ : str = data_bp_input
a__ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a__ : Optional[Any] = self.sig(lowercase)
a__ : int = bp_outa * self.wkj.T - self.thre_bpa
a__ : Dict = self.sig(lowercase)
produce_out.extend(bp_outa.getA().tolist())
a__ : List[Any] = [list(map(self.do_round , lowercase)) for each in produce_out]
return np.asarray(lowercase)
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
a__ : str = np.asmatrix(lowercase)
a__ , a__ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : List[str] = self.pooling(lowercase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 225
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = {"""do_clean_text""": False, """add_prefix_space""": False}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase_ : Optional[int] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase_ ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_input_output_texts(lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Tuple = "こんにちは、世界。 こんばんは、㔺界。"
UpperCAmelCase_ : Dict = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCAmelCase_ : int = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase_ : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase_ : Optional[int] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCAmelCase_ : Optional[int] = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : List[Any] = "こんにちは、世界。"
UpperCAmelCase_ : List[Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = "こんにちは、世界。こんばんは、世界。😀"
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase_ : List[str] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCAmelCase_ : str = tokenizer.encode(lowercase_ , prefix_text=lowercase_ )
UpperCAmelCase_ : List[Any] = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : str = tokenizer.decode(lowercase_ )
UpperCAmelCase_ : List[str] = tokenizer.decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCAmelCase_ : Union[str, Any] = "こんにちは、世界。"
UpperCAmelCase_ : Union[str, Any] = "こんばんは、㔺界。😀"
UpperCAmelCase_ : List[Any] = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Dict = len(tokenizer.encode(lowercase_ ) ) - 2
UpperCAmelCase_ : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase_ : Any = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase_ : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase_ : Dict = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : Optional[Any] = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase_ : str = tokenizer(lowercase_ , prefix_text=lowercase_ ).token_type_ids
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : str = tokenizer.encode("あンいワ" )
UpperCAmelCase_ : List[Any] = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCAmelCase_ : str = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertEqual(tokenizer.decode(lowercase_ ) , tokenizer.decode(lowercase_ ) )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertNotEqual(lowercase_ , lowercase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCAmelCase_ : Tuple = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCAmelCase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ )
UpperCAmelCase_ : int = tokenizer.batch_encode_plus(lowercase_ , padding=lowercase_ )
# fmt: off
UpperCAmelCase_ : str = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
UpperCAmelCase_ : Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase_ : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase_ )
self.assertListEqual(x_token.token_type_ids , lowercase_ )
self.assertListEqual(x_token.attention_mask , lowercase_ )
self.assertListEqual(x_token_a.input_ids , lowercase_ )
self.assertListEqual(x_token_a.token_type_ids , lowercase_ )
self.assertListEqual(x_token_a.attention_mask , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 61
|
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : List[Any] ):
lowercase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ : int = "xvjiarui/stable-diffusion-2-inpainting"
lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE )
lowercase__ : str = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : Dict = jax.random.PRNGKey(0 )
lowercase__ : Optional[int] = 50
lowercase__ : List[str] = jax.device_count()
lowercase__ : List[Any] = num_samples * [prompt]
lowercase__ : Tuple = num_samples * [init_image]
lowercase__ : Union[str, Any] = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# shard inputs and rng
lowercase__ : Union[str, Any] = replicate(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count() )
lowercase__ : Any = shard(SCREAMING_SNAKE_CASE )
lowercase__ : str = shard(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = shard(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = pipeline(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , jit=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output.images.reshape(SCREAMING_SNAKE_CASE , 512 , 512 , 3 )
lowercase__ : Union[str, Any] = images[0, 253:256, 253:256, -1]
lowercase__ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ : List[str] = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 121
|
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121
| 1
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
def __init__( self : List[str], __A : List[str], __A : Any=3, __A : List[Any]=3_2, __A : str=3, __A : Tuple=1_0, __A : Optional[Any]=[1_0, 2_0, 3_0, 4_0], __A : List[Any]=[1, 1, 2, 1], __A : List[str]=True, __A : Dict=True, __A : Any="relu", __A : Tuple=3, __A : List[Any]=None, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Dict = embeddings_size
UpperCAmelCase : Optional[int] = hidden_sizes
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : List[str] = scope
UpperCAmelCase : Optional[int] = len(__A )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def __magic_name__ ( self : List[str], __A : int, __A : Union[str, Any], __A : List[str] ):
UpperCAmelCase : Union[str, Any] = TFRegNetModel(config=__A )
UpperCAmelCase : Optional[int] = model(__A, training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def __magic_name__ ( self : Tuple, __A : int, __A : Optional[Any], __A : Optional[Any] ):
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = TFRegNetForImageClassification(__A )
UpperCAmelCase : Union[str, Any] = model(__A, labels=__A, training=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = TFRegNetModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A )
def __magic_name__ ( self : List[str] ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def __magic_name__ ( self : Optional[Any] ):
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Any = model_class(__A )
UpperCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Dict = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : int ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : List[str] ):
def check_hidden_states_output(__A : List[Any], __A : Dict, __A : int ):
UpperCAmelCase : Union[str, Any] = model_class(__A )
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__A, __A ), training=__A )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase : Tuple = layer_type
UpperCAmelCase : List[str] = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Any = True
check_hidden_states_output(__A, __A, __A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__A : Optional[Any], __A : int, __A : List[str], __A : Tuple={} ):
UpperCAmelCase : Optional[Any] = model(__A, return_dict=__A, **__A )
UpperCAmelCase : Any = model(__A, return_dict=__A, **__A ).to_tuple()
def recursive_check(__A : List[str], __A : Optional[int] ):
if isinstance(__A, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A, __A ):
recursive_check(__A, __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A, __A ) ), msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
), )
recursive_check(__A, __A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(__A )
UpperCAmelCase : List[Any] = self._prepare_for_class(__A, __A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A )
UpperCAmelCase : str = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : int = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A )
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(__A, __A )
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
UpperCAmelCase : Any = self._prepare_for_class(__A, __A, return_labels=__A )
UpperCAmelCase : List[Any] = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __magic_name__ ( self : Optional[int] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> List[Any]:
UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=__A, return_tensors='''tf''' )
# forward pass
UpperCAmelCase : str = model(**__A, training=__A )
# verify the logits
UpperCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Any = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3], __A, atol=1E-4 )
| 336
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
while b:
snake_case : List[Any] = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase__ ,a % b )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3 )}""" )
if __name__ == "__main__":
main()
| 366
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
snake_case : List[str] = MaskFormerConfig(backbone_config=lowercase )
snake_case : List[Any] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
snake_case : Dict = 847
snake_case : List[str] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
snake_case : Union[str, Any] = 150
snake_case : List[Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
snake_case : Union[str, Any] = 171
snake_case : int = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
snake_case : Optional[Any] = 133
snake_case : Optional[Any] = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
snake_case : Tuple = 19
snake_case : int = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
snake_case : int = 65
snake_case : Any = """mapillary-vistas-id2label.json"""
snake_case : Optional[Any] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : List[Any] = {int(lowercase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Tuple = dct.pop(lowercase )
snake_case : int = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case : Optional[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[Any] = in_proj_weight[:dim, :]
snake_case : Optional[int] = in_proj_bias[: dim]
snake_case : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
snake_case : List[Any] = in_proj_weight[
-dim :, :
]
snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
# fmt: off
snake_case : int = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Any = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case : Tuple = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[: hidden_size, :]
snake_case : Any = in_proj_bias[:config.hidden_size]
snake_case : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : int = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Any = in_proj_weight[-hidden_size :, :]
snake_case : Union[str, Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : Dict = in_proj_weight[: hidden_size, :]
snake_case : Dict = in_proj_bias[:config.hidden_size]
snake_case : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case : Tuple = in_proj_weight[-hidden_size :, :]
snake_case : str = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ) -> torch.Tensor:
snake_case : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Any = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> Dict:
snake_case : List[str] = get_maskformer_config(lowercase )
# load original state_dict
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[Any] = pickle.load(lowercase )
snake_case : Optional[Any] = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case : str = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_swin_q_k_v(lowercase ,config.backbone_config )
read_in_decoder_q_k_v(lowercase ,lowercase )
# update to torch tensors
for key, value in state_dict.items():
snake_case : List[Any] = torch.from_numpy(lowercase )
# load 🤗 model
snake_case : int = MaskFormerForInstanceSegmentation(lowercase )
model.eval()
for name, param in model.named_parameters():
print(lowercase ,param.shape )
snake_case , snake_case : Optional[int] = model.load_state_dict(lowercase ,strict=lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case : List[str] = prepare_img()
if "vistas" in model_name:
snake_case : Optional[int] = 65
elif "cityscapes" in model_name:
snake_case : int = 65535
else:
snake_case : List[str] = 255
snake_case : List[Any] = True if """ade""" in model_name else False
snake_case : Optional[int] = MaskFormerImageProcessor(ignore_index=lowercase ,reduce_labels=lowercase )
snake_case : Tuple = image_processor(lowercase ,return_tensors="""pt""" )
snake_case : str = model(**lowercase )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case : Union[str, Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,lowercase ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 176
| 0
|
'''simple docstring'''
import math
def UpperCAmelCase_ ( ):
lowercase_ :Union[str, Any] = input("Enter message: " )
lowercase_ :Tuple = int(input(F'Enter key [2-{len(__lowerCAmelCase ) - 1}]: ' ) )
lowercase_ :str = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
lowercase_ :Union[str, Any] = encrypt_message(__lowerCAmelCase ,__lowerCAmelCase )
elif mode.lower().startswith("d" ):
lowercase_ :Tuple = decrypt_message(__lowerCAmelCase ,__lowerCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
lowercase_ :Optional[int] = [""] * key
for col in range(__lowerCAmelCase ):
lowercase_ :List[str] = col
while pointer < len(__lowerCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
lowercase_ :Dict = math.ceil(len(__lowerCAmelCase ) / key )
lowercase_ :List[Any] = key
lowercase_ :Any = (num_cols * num_rows) - len(__lowerCAmelCase )
lowercase_ :Tuple = [""] * num_cols
lowercase_ :Union[str, Any] = 0
lowercase_ :List[str] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase_ :Optional[Any] = 0
row += 1
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 223
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270
| 0
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase__ ( _UpperCamelCase : BertModel , _UpperCamelCase : str , _UpperCamelCase : str ) -> int:
"""simple docstring"""
snake_case = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
snake_case = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
snake_case = model.state_dict()
def to_tf_var_name(_UpperCamelCase : str ):
for patt, repl in iter(_UpperCamelCase ):
snake_case = name.replace(_UpperCamelCase , _UpperCamelCase )
return f"""bert/{name}"""
def create_tf_var(_UpperCamelCase : np.ndarray , _UpperCamelCase : str , _UpperCamelCase : tf.Session ):
snake_case = tf.dtypes.as_dtype(tensor.dtype )
snake_case = tf.get_variable(dtype=_UpperCamelCase , shape=tensor.shape , name=_UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case = to_tf_var_name(_UpperCamelCase )
snake_case = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case = torch_tensor.T
snake_case = create_tf_var(tensor=_UpperCamelCase , name=_UpperCamelCase , session=_UpperCamelCase )
tf.keras.backend.set_value(_UpperCamelCase , _UpperCamelCase )
snake_case = session.run(_UpperCamelCase )
print(f"""Successfully created {tf_name}: {np.allclose(_UpperCamelCase , _UpperCamelCase )}""" )
snake_case = tf.train.Saver(tf.trainable_variables() )
saver.save(_UpperCamelCase , os.path.join(_UpperCamelCase , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any]=None ) -> List[str]:
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_UpperCamelCase , required=_UpperCamelCase , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=_UpperCamelCase , required=_UpperCamelCase , help='Directory in which to save tensorflow model' )
snake_case = parser.parse_args(_UpperCamelCase )
snake_case = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 149
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """swinv2"""
_lowerCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase=2_24 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=32 , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowerCAmelCase )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
snake_case = (0, 0, 0, 0)
| 149
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : str = MODEL_FOR_MASKED_LM_MAPPING
A__ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case_ ( self : Any ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__lowercase : Any = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-0_5, '''token''': 3_8015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-0_5, '''token''': 2_5506, '''token_str''': ''' accuser'''},
] , )
__lowercase : Any = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-0_5,
'''token''': 3_8015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-0_5,
'''token''': 2_5506,
'''token_str''': ''' accuser''',
},
] , )
__lowercase : Union[str, Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-0_5, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-0_5, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-0_5, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def snake_case_ ( self : str ):
__lowercase : Union[str, Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__lowercase : Any = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-0_5, '''token''': 3_5676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-0_5, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
__lowercase : Any = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-0_5,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-0_5, '''token''': 1_6416, '''token_str''': '''ELS'''},
] , )
__lowercase : Union[str, Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-0_5, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-0_5, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-0_5, '''token''': 1_3606, '''token_str''': ''' Clara'''},
] , )
__lowercase : List[str] = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
'''score''': 2.2E-0_5,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-0_5, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-0_5,
'''token''': 3_5676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-0_5, '''token''': 1_6416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__lowercase : Dict = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def snake_case_ ( self : List[str] ):
__lowercase : str = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__snake_case )
@slow
@require_tf
def snake_case_ ( self : str ):
__lowercase : int = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__snake_case )
def snake_case_ ( self : Optional[Any] , _snake_case : Any ):
__lowercase : Any = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
__lowercase : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 1_2790,
'''token_str''': ''' Lyon''',
},
] , )
__lowercase : int = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 1_3606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def snake_case_ ( self : Optional[Any] ):
__lowercase : Union[str, Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__lowercase : str = None
__lowercase : Optional[int] = None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
def snake_case_ ( self : Any , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__lowercase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__lowercase : List[str] = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def snake_case_ ( self : Tuple , _snake_case : Tuple , _snake_case : str ):
__lowercase : Union[str, Any] = fill_masker.tokenizer
__lowercase : Dict = fill_masker.model
__lowercase : Optional[int] = fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
__lowercase : List[str] = fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
__lowercase : Any = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('''This is''' )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Dict ):
__lowercase : Optional[int] = tokenizer.get_vocab()
__lowercase : int = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowercase : int = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
__lowercase : List[Any] = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
__lowercase : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
__lowercase : Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Call argument
__lowercase : List[str] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__lowercase : str = fill_masker(F'This is a {tokenizer.mask_token}' , targets=__snake_case )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
__lowercase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
__lowercase : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Score equivalence
__lowercase : List[str] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=__snake_case )
__lowercase : Tuple = [top_mask['''token_str'''] for top_mask in outputs]
__lowercase : int = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
__lowercase : Dict = fill_masker(F'This is a {tokenizer.mask_token}' , targets=__snake_case )
__lowercase : int = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
__lowercase : Optional[int] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
__lowercase : Optional[int] = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[''''''] )
with self.assertRaises(__snake_case ):
__lowercase : Optional[int] = fill_masker(F'This is a {tokenizer.mask_token}' , targets='''''' )
def snake_case_ ( self : int , _snake_case : str , _snake_case : Union[str, Any] ):
__lowercase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
__lowercase : Tuple = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
__lowercase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__lowercase : Union[str, Any] = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def snake_case_ ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple ):
__lowercase : List[Any] = tokenizer.get_vocab()
__lowercase : int = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
__lowercase : Any = sorted(vocab.keys() )[:3]
__lowercase : str = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowercase : int = [el['''token_str'''] for el in sorted(__snake_case , key=lambda _snake_case : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
__lowercase : Optional[Any] = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def snake_case_ ( self : Tuple , _snake_case : List[str] , _snake_case : Optional[Any] ):
__lowercase : Optional[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__lowercase : Optional[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowercase : List[Any] = sorted(vocab.keys() )[:3]
__lowercase : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowercase : Union[str, Any] = fill_masker(F'My name is {tokenizer.mask_token}' , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def snake_case_ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : List[Any] ):
__lowercase : Dict = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
__lowercase : int = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
| 156
|
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : List[str] = 'deit'
def __init__( self : Optional[Any] ,_UpperCAmelCase : Tuple=768 ,_UpperCAmelCase : Dict=12 ,_UpperCAmelCase : Optional[Any]=12 ,_UpperCAmelCase : int=3072 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Optional[int]=1E-12 ,_UpperCAmelCase : Optional[int]=224 ,_UpperCAmelCase : int=16 ,_UpperCAmelCase : Dict=3 ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Tuple=16 ,**_UpperCAmelCase : int ,):
super().__init__(**_UpperCAmelCase )
_a : Union[str, Any] = hidden_size
_a : List[str] = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[Any] = intermediate_size
_a : Any = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Tuple = layer_norm_eps
_a : List[str] = image_size
_a : List[str] = patch_size
_a : List[Any] = num_channels
_a : int = qkv_bias
_a : int = encoder_stride
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = version.parse('1.11' )
@property
def __lowercase ( self : List[str] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : Tuple ):
return 1E-4
| 107
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> bool:
'''simple docstring'''
snake_case_ = len(__UpperCAmelCase )
snake_case_ = len(__UpperCAmelCase )
snake_case_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
snake_case_ = True
for i in range(__UpperCAmelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
snake_case_ = True
if a[i].islower():
snake_case_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : List[str] = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase : Any = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase : Dict = 0
lowerCAmelCase : Dict = 0xe000
lowerCAmelCase : str = 0xe001
lowerCAmelCase : str = 0xe002
lowerCAmelCase : Optional[int] = 0xe003
lowerCAmelCase : List[Any] = 0xe004
# Maps special codepoints to human-readable names.
lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : Tuple=chr(lowerCAmelCase__) , lowerCAmelCase__ : str=chr(lowerCAmelCase__) , lowerCAmelCase__ : List[Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Optional[int]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=chr(lowerCAmelCase__) , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=2048 , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , model_max_length=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_: Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_: List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_: Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_: List[str] = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_: Tuple = len(self._special_codepoints)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self._unicode_vocab_size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
return list(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str):
try:
return ord(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid token: '{token}'")
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCAmelCase__)
except TypeError:
raise ValueError(F"invalid id: {index}")
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Dict):
return "".join(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [1] + ([0] * len(lowerCAmelCase__)) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCAmelCase__)) + [1]
return result
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: List[str] = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
return ()
| 127
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : Union[str, Any] = OPTConfig
__lowercase : Union[str, Any] = {}
__lowercase : List[Any] = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=1_6 , lowerCAmelCase__=1_6 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = word_embed_proj_dim
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1)
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__)
return config, inputs_dict
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFOPTModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size)
__SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1]))
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3)
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowercase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowercase : int = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowercase : List[str] = False
__lowercase : Union[str, Any] = False
__lowercase : Tuple = False
__lowercase : Union[str, Any] = 10
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__):
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , """weight"""):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings())
__SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
__SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__)
# check that weights remain the same after resizing
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
__SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase__)
def _lowerCAmelCase ( UpperCamelCase_ ):
return tf.constant(UpperCamelCase_ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = 99
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa) * 2
__SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained("""facebook/opt-350m""")
__SCREAMING_SNAKE_CASE = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id)
with tf.GradientTape():
__SCREAMING_SNAKE_CASE = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__).last_hidden_state
__SCREAMING_SNAKE_CASE = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = xla_generate(lowerCAmelCase__ , lowerCAmelCase__)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model)
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
__SCREAMING_SNAKE_CASE = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
__SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4))
@require_tf
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-125m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """left"""
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" , padding=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["""attention_mask"""])
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa))
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
__SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__)
for prompt in self.prompts:
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , max_length=1_0)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 100
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000
__SCREAMING_SNAKE_CASE = target_max_size / current_max_size
__SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Converting model...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE = 125
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = (1, 15, 3)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__SCREAMING_SNAKE_CASE = (1, 125, 7)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__SCREAMING_SNAKE_CASE = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 100
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def A_( A : float , A : float , A : float):
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError('One and only one argument must be 0')
if resistance == 0:
return {"resistance": sqrt(pow(A , 2) - pow(A , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(A , 2) - pow(A , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(A , 2) + pow(A , 2))}
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """timesformer"""
def __init__( self , A_=224 , A_=16 , A_=3 , A_=8 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-6 , A_=True , A_="divided_space_time" , A_=0 , **A_ , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = num_frames
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = qkv_bias
UpperCamelCase = attention_type
UpperCamelCase = drop_path_rate
| 251
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , """embed_dim"""))
self.parent.assertTrue(hasattr(lowercase_ , """num_heads"""))
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=[1_6, 4_8, 9_6] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = patch_stride
__SCREAMING_SNAKE_CASE = patch_padding
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = stride_kv
__SCREAMING_SNAKE_CASE = depth
__SCREAMING_SNAKE_CASE = cls_token
__SCREAMING_SNAKE_CASE = attention_drop_rate
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
# create a random int32 tensor of given shape
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFCvtModel(config=lowercase_)
__SCREAMING_SNAKE_CASE = model(lowercase_ , training=lowercase_)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth)):
__SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification(lowercase_)
__SCREAMING_SNAKE_CASE = model(lowercase_ , labels=lowercase_ , training=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowercase : Tuple = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowercase : Any = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowercase : List[str] = False
__lowercase : int = False
__lowercase : Dict = False
__lowercase : Dict = False
__lowercase : Optional[int] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtModelTester(self)
__SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def snake_case_ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case_ ( self):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case_ ( self):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(lowercase_)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowercase_)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_)
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowercase_)
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowercase_ , lowercase_))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = len(self.model_tester.depth)
self.assertEqual(len(lowercase_) , lowercase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def snake_case_ ( self):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowercase_ , return_tensors="""tf""")
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowercase_)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4))
| 100
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 237
| 0
|
from manim import *
class lowerCAmelCase__( __lowercase ):
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE : Dict = Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE : List[str] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Any = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Any = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[int] = Text("CPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = [mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE : Dict = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Optional[int] = Text("GPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : List[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Dict = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : List[str] = Text("Model" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Dict = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Tuple = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
model_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : Optional[int] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Any = Text("Loaded Checkpoint" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Any = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : List[str] = []
for i, rect in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
ckpt_arr.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE : List[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE : int = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : List[Any] = [meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE : int = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : str = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
_SCREAMING_SNAKE_CASE : Tuple = Text("Disk" , font_size=2_4 )
_SCREAMING_SNAKE_CASE : Dict = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) , Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for i, rect in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(FadeOut(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase ) , )
self.wait()
| 365
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'BlipImageProcessor'
__snake_case = 'AutoTokenizer'
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
# add QFormer tokenizer
_SCREAMING_SNAKE_CASE : List[str] = qformer_tokenizer
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = True , __lowerCamelCase = False , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , **__lowerCamelCase , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_SCREAMING_SNAKE_CASE : Any = BatchFeature()
if text is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
encoding.update(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self.qformer_tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : str = qformer_text_encoding.pop("input_ids" )
_SCREAMING_SNAKE_CASE : List[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase_ ( self , *__lowerCamelCase , **__lowerCamelCase ) -> str:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self , __lowerCamelCase , **__lowerCamelCase ) -> Any:
if os.path.isfile(__lowerCamelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def UpperCamelCase_ ( cls , __lowerCamelCase , **__lowerCamelCase ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase , subfolder="qformer_tokenizer" )
_SCREAMING_SNAKE_CASE : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCamelCase , **__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 325
| 0
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(args=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139
| 1
|
import math
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> bool:
"""simple docstring"""
a__ : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1) / 2 + 1 / 2)
return exponent == int(__snake_case)
def lowerCAmelCase_ ( _lowercase : str = 1 / 1_2345) -> int:
"""simple docstring"""
a__ : str = 0
a__ : Tuple = 0
a__ : Tuple = 3
while True:
a__ : Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__snake_case):
a__ : str = int(__snake_case)
total_partitions += 1
if check_partition_perfect(__snake_case):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__snake_case)
integer += 1
if __name__ == "__main__":
print(f'{solution() = }')
| 364
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : List[str] =logging.get_logger(__name__)
_lowercase : List[str] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Optional[Any] ={
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_lowercase : int ={"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
a__ : List[str] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
a__ : Optional[Any] = bs[:]
a__ : List[Any] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowercase)
cs.append(2**8 + n)
n += 1
a__ : Tuple = [chr(_lowercase) for n in cs]
return dict(zip(_lowercase , _lowercase))
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]:
"""simple docstring"""
a__ : int = set()
a__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Any = char
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = VOCAB_FILES_NAMES
__lowerCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ) -> List[Any]:
"""simple docstring"""
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
a__ : Optional[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
a__ : Tuple = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
a__ : Any = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
a__ : List[Any] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
a__ : str = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : List[str] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : str = json.load(__lowercase )
a__ : Dict = {v: k for k, v in self.encoder.items()}
a__ : Any = errors # how to handle errors in decoding
a__ : Union[str, Any] = bytes_to_unicode()
a__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
a__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Optional[Any] = {}
a__ : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = get_pairs(__lowercase )
if not pairs:
return token
while True:
a__ : List[Any] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : Dict = bigram
a__ : List[Any] = []
a__ : int = 0
while i < len(__lowercase ):
try:
a__ : str = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Optional[int] = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : List[Any] = get_pairs(__lowercase )
a__ : Optional[Any] = """ """.join(__lowercase )
a__ : Tuple = word
return word
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = []
for token in re.findall(self.pat , __lowercase ):
a__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
return self.decoder.get(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = """""".join(__lowercase )
a__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : int = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : Optional[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : str = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Tuple = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : Any = [self.sep_token_id]
a__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , **__lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
a__ : Union[str, Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[str]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[int]:
"""simple docstring"""
a__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(__lowercase )
a__ : Optional[int] = """ """.join(__lowercase )
a__ : Any = self.encode(__lowercase )
if len(__lowercase ) > self.model_max_length:
a__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 266
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[Any]=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Tuple=5_1_2 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Tuple=None , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: List[str] = batch_size
_A: List[str] = seq_length
_A: Union[str, Any] = is_training
_A: List[Any] = use_input_mask
_A: Tuple = use_token_type_ids
_A: int = use_labels
_A: Optional[Any] = vocab_size
_A: Dict = hidden_size
_A: Union[str, Any] = num_hidden_layers
_A: List[str] = num_attention_heads
_A: int = intermediate_size
_A: Tuple = hidden_act
_A: Any = hidden_dropout_prob
_A: Union[str, Any] = attention_probs_dropout_prob
_A: Any = max_position_embeddings
_A: Optional[Any] = type_vocab_size
_A: List[Any] = type_sequence_label_size
_A: Dict = initializer_range
_A: Any = num_labels
_A: Union[str, Any] = num_choices
_A: Optional[int] = scope
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: Any = None
if self.use_input_mask:
_A: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A: Any = None
if self.use_token_type_ids:
_A: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A: int = None
_A: str = None
_A: List[str] = None
if self.use_labels:
_A: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_A: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : str ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.get_config()
_A: Optional[int] = 3_0_0
return config
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
): Tuple = self.prepare_config_and_inputs()
_A: Tuple = True
_A: Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A: Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __magic_name__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: List[Any] = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A: Dict = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A: Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
_A: List[Any] = True
_A: Dict = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Union[str, Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_A: str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_A: Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: List[Any] = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: Optional[Any] = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: str = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.num_labels
_A: Optional[int] = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: int = self.num_labels
_A: List[str] = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: Tuple = self.num_choices
_A: List[str] = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A: List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A: Dict = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Union[str, Any] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
): str = config_and_inputs
_A: Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = ()
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Tuple = MraModelTester(self )
_A: List[str] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A: List[str] = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: int = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='''MRA does not output attentions''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Dict = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_A: int = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_A: Tuple = model(lowerCAmelCase_ )[0]
_A: Tuple = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A: Union[str, Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Union[str, Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_A: List[str] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
_A: Tuple = model(lowerCAmelCase_ )[0]
_A: Dict = 5_0_2_6_5
_A: Optional[Any] = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A: Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_A: List[Any] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
_A: Tuple = model(lowerCAmelCase_ )[0]
_A: List[Any] = 5_0_2_6_5
_A: Union[str, Any] = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A: int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 121
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCamelCase__ ( a=None ) -> int:
_A: Union[str, Any] = argparse.ArgumentParser(add_help=a , allow_abbrev=a )
# The main config parser
_A: str = config_command_parser(a )
# The subparser to add commands to
_A: str = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(a , parents=[parent_parser] )
update_command_parser(a , parents=[parent_parser] )
return config_parser
def lowerCamelCase__ ( ) -> Union[str, Any]:
_A: Any = get_config_parser()
_A: Tuple = config_parser.parse_args()
if not hasattr(a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a )
if __name__ == "__main__":
main()
| 121
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
while a != 0:
UpperCAmelCase : Tuple = b % a, a
return b
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
UpperCAmelCase : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCAmelCase_ )
UpperCAmelCase : Any = 1, 0, a
UpperCAmelCase : Dict = 0, 1, m
while va != 0:
UpperCAmelCase : Tuple = ua // va
UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 358
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 280
| 0
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(lowerCAmelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ )
def accumulate_on_replica(_snake_case ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_snake_case , _snake_case ):
with strategy.scope():
__a = strategy.experimental_local_results(lowerCAmelCase__ )
local_variables[0].assign(lowerCAmelCase__ )
local_variables[1].assign(lowerCAmelCase__ )
strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase__ )
def _check_local_values(_snake_case , _snake_case ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 6
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase__ : str = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''summarization'''
_A : Optional[Any] = ['''loss''']
_A : Tuple = ROUGE_KEYS
_A : int = '''rouge2'''
def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json"""
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.config.model_type
__SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE : Any = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""]
__SCREAMING_SNAKE_CASE : Any = hparams.num_workers
__SCREAMING_SNAKE_CASE : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length
__SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__SCREAMING_SNAKE_CASE : Optional[int] = True
return readable_batch
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""]
__SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""]
if isinstance(self.model , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ):
"""simple docstring"""
self.step_count += 1
__SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""]
__SCREAMING_SNAKE_CASE : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__SCREAMING_SNAKE_CASE : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE : List[str] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] )
__SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE : str = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE : str = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[Any] = '''translation'''
_A : int = ['''loss''']
_A : Union[str, Any] = ['''bleu''']
_A : Dict = '''bleu'''
def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang
__SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss"""
__SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE : Optional[int] = """"""
__SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) )
if checkpoints:
__SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1]
__SCREAMING_SNAKE_CASE : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser)
UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
main(args)
| 112
| 0
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ ( a_ ):
UpperCamelCase_ :Tuple = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ :Union[str, Any] = '''AutoImageProcessor'''
UpperCamelCase_ :str = '''AutoTokenizer'''
def __init__( self , _lowercase=None , _lowercase=None , **_lowercase )-> int:
UpperCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
UpperCamelCase_ = kwargs.pop("feature_extractor" )
UpperCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
UpperCamelCase_ = self.image_processor
UpperCamelCase_ = False
def __call__( self , *_lowercase , **_lowercase )-> List[Any]:
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
UpperCamelCase_ = kwargs.pop("images" , lowercase_ )
UpperCamelCase_ = kwargs.pop("text" , lowercase_ )
if len(lowercase_ ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
UpperCamelCase_ = self.image_processor(lowercase_ , *lowercase_ , **lowercase_ )
if text is not None:
UpperCamelCase_ = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase_ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> str:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Optional[int]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@contextmanager
def UpperCAmelCase_ ( self )-> List[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
UpperCamelCase_ = True
UpperCamelCase_ = self.tokenizer
yield
UpperCamelCase_ = self.image_processor
UpperCamelCase_ = False
def UpperCAmelCase_ ( self , _lowercase , _lowercase=False , _lowercase=None )-> List[str]:
if added_vocab is None:
UpperCamelCase_ = self.tokenizer.get_added_vocab()
UpperCamelCase_ = {}
while tokens:
UpperCamelCase_ = re.search(r"<s_(.*?)>" , lowercase_ , re.IGNORECASE )
if start_token is None:
break
UpperCamelCase_ = start_token.group(1 )
UpperCamelCase_ = re.search(rF"</s_{key}>" , lowercase_ , re.IGNORECASE )
UpperCamelCase_ = start_token.group()
if end_token is None:
UpperCamelCase_ = tokens.replace(lowercase_ , "" )
else:
UpperCamelCase_ = end_token.group()
UpperCamelCase_ = re.escape(lowercase_ )
UpperCamelCase_ = re.escape(lowercase_ )
UpperCamelCase_ = re.search(F"{start_token_escaped}(.*?){end_token_escaped}" , lowercase_ , re.IGNORECASE )
if content is not None:
UpperCamelCase_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
UpperCamelCase_ = self.tokenajson(lowercase_ , is_inner_value=lowercase_ , added_vocab=lowercase_ )
if value:
if len(lowercase_ ) == 1:
UpperCamelCase_ = value[0]
UpperCamelCase_ = value
else: # leaf nodes
UpperCamelCase_ = []
for leaf in content.split(r"<sep/>" ):
UpperCamelCase_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
UpperCamelCase_ = leaf[1:-2] # for categorical special tokens
output[key].append(lowercase_ )
if len(output[key] ) == 1:
UpperCamelCase_ = output[key][0]
UpperCamelCase_ = tokens[tokens.find(lowercase_ ) + len(lowercase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase_ , added_vocab=lowercase_ )
if len(lowercase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase_ ( self )-> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 364
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[Any] = """dandelin/vilt-b32-finetuned-vqa"""
UpperCamelCase_ :Dict = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
UpperCamelCase_ :Optional[int] = """image_qa"""
UpperCamelCase_ :int = AutoProcessor
UpperCamelCase_ :Tuple = AutoModelForVisualQuestionAnswering
UpperCamelCase_ :Optional[int] = ["""image""", """text"""]
UpperCamelCase_ :Tuple = ["""text"""]
def __init__( self , *_lowercase , **_lowercase )-> Union[str, Any]:
requires_backends(self , ["vision"] )
super().__init__(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> str:
return self.pre_processor(_lowercase , _lowercase , return_tensors="pt" )
def UpperCAmelCase_ ( self , _lowercase )-> str:
with torch.no_grad():
return self.model(**_lowercase ).logits
def UpperCAmelCase_ ( self , _lowercase )-> List[Any]:
UpperCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 60
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _A ( lowercase ):
"""simple docstring"""
a =SwinConfig()
a =swin_name.split('''_''' )
a =name_split[1]
a =int(name_split[4] )
a =int(name_split[3][-1] )
if model_size == "tiny":
a =96
a =(2, 2, 6, 2)
a =(3, 6, 12, 24)
elif model_size == "small":
a =96
a =(2, 2, 18, 2)
a =(3, 6, 12, 24)
elif model_size == "base":
a =1_28
a =(2, 2, 18, 2)
a =(4, 8, 16, 32)
else:
a =1_92
a =(2, 2, 18, 2)
a =(6, 12, 24, 48)
if "in22k" in swin_name:
a =2_18_41
else:
a =10_00
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =img_size
a =num_classes
a =embed_dim
a =depths
a =num_heads
a =window_size
return config
def _A ( lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name:
a =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
a ='''encoder.''' + name
if "attn.proj" in name:
a =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
a ='''layernorm.weight'''
if name == "norm.bias":
a ='''layernorm.bias'''
if "head" in name:
a =name.replace('''head''' , '''classifier''' )
else:
a ='''swin.''' + name
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
a =key.split('''.''' )
a =int(key_split[1] )
a =int(key_split[3] )
a =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a =val[:dim, :]
a =val[
dim : dim * 2, :
]
a =val[-dim:, :]
else:
a =val[
:dim
]
a =val[
dim : dim * 2
]
a =val[
-dim:
]
else:
a =val
return orig_state_dict
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
a =get_swin_config(lowercase )
a =SwinForImageClassification(lowercase )
model.eval()
a =convert_state_dict(timm_model.state_dict() , lowercase )
model.load_state_dict(lowercase )
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
a =image_processor(images=lowercase , return_tensors='''pt''' )
a =timm_model(inputs['''pixel_values'''] )
a =model(**lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 81
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102
| 0
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE_: Tuple =datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
SCREAMING_SNAKE_CASE_: int ='\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
SCREAMING_SNAKE_CASE_: Any ='\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any=False , snake_case_ : List[str]=False , snake_case_ : List[str]=True , snake_case_ : Union[str, Any]=False , snake_case_ : int="dummy_doc" ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = {doc: key_lines}
UpperCAmelCase_ = {doc: sys_lines}
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(snake_case_ , key_doc_lines[doc] , snake_case_ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(snake_case_ , key_doc_lines[doc] , snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = reader.get_doc_mentions(snake_case_ , sys_doc_lines[doc] , snake_case_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase_ = reader.set_annotated_parse_trees(snake_case_ , key_doc_lines[doc] , snake_case_ , snake_case_ )
if remove_nested:
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(snake_case_ , snake_case_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase_ , UpperCAmelCase_ = reader.remove_nested_coref_mentions(snake_case_ , snake_case_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase_ = reader.get_mention_assignments(snake_case_ , snake_case_ )
UpperCAmelCase_ = reader.get_mention_assignments(snake_case_ , snake_case_ )
UpperCAmelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = get_coref_infos(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for name, metric in metrics:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = evaluator.evaluate_documents(snake_case_ , snake_case_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 1_00:.2f}""" , f""" Precision: {precision * 1_00:.2f}""" , f""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase_ = (conll / 3) * 1_00
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def lowerCAmelCase_ ( snake_case_ : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
UpperCAmelCase_ = line.split()[5]
if not parse_col == "-":
UpperCAmelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _lowercase (self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def _lowercase (self : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] , __a : int=True , __a : List[Any]=False , __a : Optional[Any]=False , __a : List[str]=False ):
UpperCAmelCase_ = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
UpperCAmelCase_ = util.check_gold_parse_annotation(__a )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase_ = evaluate(
key_lines=__a , sys_lines=__a , metrics=__a , NP_only=__a , remove_nested=__a , keep_singletons=__a , min_span=__a , )
return score
| 106
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __A ( unittest.TestCase , UpperCamelCase__ ):
def _lowercase (self : Tuple ):
UpperCAmelCase_ = load_tool("text-to-speech" )
self.tool.setup()
def _lowercase (self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _lowercase (self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ = self.tool("hey" )
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 106
| 1
|
import gc
import threading
import time
import psutil
import torch
class snake_case__ :
"""simple docstring"""
def __init__( self : Any ) -> Optional[int]:
a = psutil.Process()
a = False
def __UpperCAmelCase ( self : str ) -> str:
a = -1
while True:
a = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = True
a = threading.Thread(target=self.peak_monitor )
a = True
self.thread.start()
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
a = False
self.thread.join()
return self.cpu_memory_peak
__lowerCAmelCase : List[str] = PeakCPUMemory()
def __magic_name__ ( ):
'''simple docstring'''
a = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
a = torch.cuda.memory_allocated(A )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
a = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
a = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
a = (torch.cuda.memory_allocated(A ) - start_measures[str(A )]) / 2**20
a = (torch.cuda.max_memory_allocated(A ) - start_measures[str(A )]) / 2**20
return measures
def __magic_name__ ( A : List[Any], A : Any ):
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(A )]:.2f}MiB""" )
a = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 107
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase : Dict = {'bert_for_seq_generation': 512}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any:
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 107
| 1
|
'''simple docstring'''
import argparse
import os
import re
lowerCamelCase = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def _A ( _lowerCAmelCase , _lowerCAmelCase = False ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowercase =f.read()
__lowercase =content.split('\n' )
__lowercase =[]
__lowercase =0
while line_idx < len(_lowerCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowercase =len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowercase =[]
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowercase =line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowercase =sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : _re_identifier.search(_lowerCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowerCAmelCase ) )
elif "\n".join(_lowerCAmelCase ) != content:
return True
def _A ( _lowerCAmelCase = False ):
"""simple docstring"""
__lowercase =[os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for f in os.listdir(_lowerCAmelCase ) if f.endswith('.py' )]
__lowercase =[sort_auto_mapping(_lowerCAmelCase , overwrite=_lowerCAmelCase ) for fname in fnames]
if not overwrite and any(_lowerCAmelCase ):
__lowercase =[f for f, d in zip(_lowerCAmelCase , _lowerCAmelCase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(_lowerCAmelCase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[int]=1):
'''simple docstring'''
__lowercase =tokenizer
__lowercase =dataset
__lowercase =len(_lowerCAmelCase) if n_tasks is None else n_tasks
__lowercase =n_copies
def __iter__( self : Union[str, Any]):
'''simple docstring'''
__lowercase =[]
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip())
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='pt')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =start_length
__lowercase =eof_strings
__lowercase =tokenizer
def __call__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :])
__lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(_lowerCAmelCase)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =re.split('(%s)' % '|'.join(_lowerCAmelCase ) , _lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=20 , **_lowerCAmelCase ):
"""simple docstring"""
__lowercase =defaultdict(_lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCAmelCase ) ):
with torch.no_grad():
__lowercase =batch['ids'].shape[-1]
__lowercase =accelerator.unwrap_model(_lowerCAmelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_lowerCAmelCase , **_lowerCAmelCase )
# each task is generated batch_size times
__lowercase =batch['task_id'].repeat(_lowerCAmelCase )
__lowercase =accelerator.pad_across_processes(
_lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__lowercase , __lowercase =accelerator.gather((generated_tokens, generated_tasks) )
__lowercase =generated_tokens.cpu().numpy()
__lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCAmelCase , _lowerCAmelCase ):
gen_token_dict[task].append(_lowerCAmelCase )
__lowercase =[[] for _ in range(_lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowercase =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
code_gens[task].append(remove_last_block(_lowerCAmelCase ) )
return code_gens
def _A ( ):
"""simple docstring"""
__lowercase =HfArgumentParser(_lowerCAmelCase )
__lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowercase ='false'
if args.num_workers is None:
__lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowercase =Accelerator()
set_seed(args.seed , device_specific=_lowerCAmelCase )
# Load model and tokenizer
__lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase =tokenizer.eos_token
__lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowercase ={
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCAmelCase , _lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
__lowercase =load_dataset('openai_humaneval' )
__lowercase =load_metric('code_eval' )
__lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
__lowercase =args.n_samples // args.batch_size
__lowercase =TokenizedDataset(_lowerCAmelCase , human_eval['test'] , n_copies=_lowerCAmelCase , n_tasks=_lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowercase =DataLoader(_lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowercase =code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
__lowercase , __lowercase =accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =complete_code(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , n_tasks=_lowerCAmelCase , batch_size=args.batch_size , **_lowerCAmelCase , )
if accelerator.is_main_process:
__lowercase =[]
for task in tqdm(range(_lowerCAmelCase ) ):
__lowercase =human_eval['test'][task]['test']
__lowercase =f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
__lowercase , __lowercase =code_eval_metric.compute(
references=_lowerCAmelCase , predictions=_lowerCAmelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 48
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__UpperCamelCase :Dict = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase , cache_dir=__lowercase)
__UpperCamelCase :int = [t[-1] for t in os.walk(os.path.join(__lowercase , os.listdir(__lowercase)[0] , '''snapshots'''))]
__UpperCamelCase :Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase :Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__lowercase)
__UpperCamelCase :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Optional[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :str = 4
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :Dict = num_samples * [prompt]
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :str = replicate(__lowercase)
__UpperCamelCase :Optional[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(__lowercase , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
__UpperCamelCase :Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(__lowercase) == num_samples
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__lowercase)
__UpperCamelCase :List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :List[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :str = 50
__UpperCamelCase :Union[str, Any] = jax.device_count()
__UpperCamelCase :Any = num_samples * [prompt]
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :List[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :str = shard(__lowercase)
__UpperCamelCase :List[str] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase)
__UpperCamelCase :Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :str = jax.random.PRNGKey(0)
__UpperCamelCase :Dict = 50
__UpperCamelCase :Optional[Any] = jax.device_count()
__UpperCamelCase :Optional[int] = num_samples * [prompt]
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Optional[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase , __UpperCamelCase :Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
__UpperCamelCase :List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :List[Any] = jax.random.PRNGKey(0)
__UpperCamelCase :List[Any] = 50
__UpperCamelCase :Any = jax.device_count()
__UpperCamelCase :List[str] = num_samples * [prompt]
__UpperCamelCase :Tuple = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Any = replicate(__lowercase)
__UpperCamelCase :Optional[int] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :Dict = shard(__lowercase)
__UpperCamelCase :Optional[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
__UpperCamelCase , __UpperCamelCase :Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__lowercase , safety_checker=__lowercase , )
__UpperCamelCase :str = scheduler.create_state()
__UpperCamelCase :Any = scheduler_state
__UpperCamelCase :Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Union[str, Any] = jax.random.PRNGKey(0)
__UpperCamelCase :Any = 50
__UpperCamelCase :str = jax.device_count()
__UpperCamelCase :Optional[int] = num_samples * [prompt]
__UpperCamelCase :List[str] = pipeline.prepare_inputs(__lowercase)
# shard inputs and rng
__UpperCamelCase :Optional[int] = replicate(__lowercase)
__UpperCamelCase :List[Any] = jax.random.split(__lowercase , __lowercase)
__UpperCamelCase :int = shard(__lowercase)
__UpperCamelCase :List[Any] = pipeline(__lowercase , __lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(__lowercase , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
__UpperCamelCase :Optional[int] = jax.device_count()
__UpperCamelCase :int = num_samples * [prompt]
__UpperCamelCase :Optional[Any] = jax.random.split(jax.random.PRNGKey(0) , __lowercase)
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , )
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Optional[int] = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :List[Any] = shard(__lowercase)
__UpperCamelCase :str = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__UpperCamelCase , __UpperCamelCase :Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__lowercase , use_memory_efficient_attention=__lowercase , )
__UpperCamelCase :List[str] = replicate(__lowercase)
__UpperCamelCase :Dict = pipeline.prepare_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = shard(__lowercase)
__UpperCamelCase :str = pipeline(__lowercase , __lowercase , __lowercase , jit=__lowercase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase :Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 43
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
super().setUp()
lowercase__ : List[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase__ : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase__ : Dict = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : str = '''adapt act apte'''
lowercase__ : Any = '''adapt act apte'''
return input_text, output_text
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Tuple = '''adapt act apte'''
lowercase__ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase__ : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
lowercase__ : str = '''I am a small frog.'''
lowercase__ : Union[str, Any] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
lowercase__ : List[str] = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase__ : Optional[Any] = '''I am a small frog .'''
lowercase__ : Any = '''.'''
lowercase__ : List[Any] = tok(__lowerCAmelCase )['''input_ids''']
lowercase__ : Optional[Any] = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 198
| 0
|
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ , A__ ):
A__ : List[str] = None
A__ : List[Any] = None
A__ : Any = graph
self._normalize_graph(A__ , A__ )
A__ : Optional[Any] = len(A__ )
A__ : Union[str, Any] = None
def __A ( self , A__ , A__ ):
if sources is int:
A__ : Any = [sources]
if sinks is int:
A__ : Union[str, Any] = [sinks]
if len(A__ ) == 0 or len(A__ ) == 0:
return
A__ : Tuple = sources[0]
A__ : List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A__ ) > 1 or len(A__ ) > 1:
A__ : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A__ : Tuple = max_input_flow
A__ : Union[str, Any] = 0
A__ : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ : int = max_input_flow
A__ : List[Any] = size - 1
def __A ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self , A__ ):
A__ : Optional[Any] = algorithm(self )
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : Optional[int] = flow_network
A__ : Union[str, Any] = flow_network.verticesCount
A__ : Tuple = flow_network.sourceIndex
A__ : str = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ : Any = flow_network.graph
A__ : int = False
def __A ( self ):
if not self.executed:
self._algorithm()
A__ : List[Any] = True
def __A ( self ):
pass
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__(A__ )
# use this to save your result
A__ : Union[str, Any] = -1
def __A ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__(A__ )
A__ : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ : List[str] = [0] * self.verticies_count
A__ : Dict = [0] * self.verticies_count
def __A ( self ):
A__ : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ : List[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ : Tuple = 0
while i < len(A__ ):
A__ : Union[str, Any] = vertices_list[i]
A__ : Tuple = self.heights[vertex_index]
self.process_vertex(A__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A__ ) )
A__ : Optional[Any] = 0
else:
i += 1
A__ : Tuple = sum(self.preflow[self.source_index] )
def __A ( self , A__ ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A__ , A__ )
self.relabel(A__ )
def __A ( self , A__ , A__ ):
A__ : List[Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self , A__ ):
A__ : List[str] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ : int = self.heights[to_index]
if min_height is not None:
A__ : Optional[int] = min_height + 1
if __name__ == "__main__":
A_ : int = [0]
A_ : Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A_ : Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A_ : Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A_ : List[str] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 141
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 1
|
__snake_case : List[Any] = "Alexander Joslin"
import operator as op
from .stack import Stack
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[Any] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
a_ : Dict = Stack()
a_ : str = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__))
elif i in operators:
# RULE 2
operator_stack.push(a__)
elif i == ")":
# RULE 4
a_ : Dict = operator_stack.peek()
operator_stack.pop()
a_ : str = operand_stack.peek()
operand_stack.pop()
a_ : List[Any] = operand_stack.peek()
operand_stack.pop()
a_ : Dict = operators[opr](a__ , a__)
operand_stack.push(a__)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__snake_case : Dict = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 248
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
a = {}
a = {}
# preprocess args
if "points_per_batch" in kwargs:
a = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
a = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
a = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
a = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
a = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
a = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
a = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
a = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
a = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
a = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
a = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
a = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_ = 0 , lowerCamelCase_ = 512 / 1500 , lowerCamelCase_ = 32 , lowerCamelCase_ = 1 , ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
a = self.image_processor.size["longest_edge"]
a , a , a , a = self.image_processor.generate_crop_boxes(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = self.image_processor(images=lowerCamelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
a = self.get_inference_context()
with inference_context():
a = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device )
a = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
a = image_embeddings
a = grid_points.shape[1]
a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
a = grid_points[:, i : i + points_per_batch, :, :]
a = input_labels[:, i : i + points_per_batch]
a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0.88 , lowerCamelCase_=0.95 , lowerCamelCase_=0 , lowerCamelCase_=1 , ):
"""simple docstring"""
a = model_inputs.pop("input_boxes" )
a = model_inputs.pop("is_last" )
a = model_inputs.pop("original_sizes" ).tolist()
a = model_inputs.pop("reshaped_input_sizes" ).tolist()
a = self.model(**lowerCamelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a = model_outputs["pred_masks"]
a = self.image_processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_ )
a = model_outputs["iou_scores"]
a , a , a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.7 , ):
"""simple docstring"""
a = []
a = []
a = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
a = torch.cat(lowerCamelCase_ )
a = torch.cat(lowerCamelCase_ )
a , a , a , a = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = defaultdict(lowerCamelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_ )
a = {}
if output_rle_mask:
a = rle_mask
if output_bboxes_mask:
a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 227
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ : Optional[Any] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[PIL.Image.Image, np.ndarray]
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Tuple , lowercase_ : PriorTransformer , lowercase_ : CLIPVisionModel , lowercase_ : CLIPImageProcessor , lowercase_ : HeunDiscreteScheduler , lowercase_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=lowercase_ , image_encoder=lowercase_ , image_processor=lowercase_ , scheduler=lowercase_ , renderer=lowercase_ , )
def _snake_case ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
if latents is None:
snake_case_ : List[str] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case_ : List[str] = latents.to(lowercase_ )
snake_case_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : Any , lowercase_ : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
snake_case_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
def _snake_case ( self : Tuple ):
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _snake_case ( self : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[int] , ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(image[0] , torch.Tensor ):
snake_case_ : Any = torch.cat(lowercase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase_ , axis=0 )
if not isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Union[str, Any] = self.image_processor(lowercase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
snake_case_ : int = image.to(dtype=self.image_encoder.dtype , device=lowercase_ )
snake_case_ : Any = self.image_encoder(lowercase_ )['''last_hidden_state''']
snake_case_ : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
snake_case_ : Optional[Any] = image_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
snake_case_ : Tuple = torch.zeros_like(lowercase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self : Any , lowercase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowercase_ : int = 1 , lowercase_ : int = 25 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : float = 4.0 , lowercase_ : int = 64 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ):
if isinstance(lowercase_ , PIL.Image.Image ):
snake_case_ : Union[str, Any] = 1
elif isinstance(lowercase_ , torch.Tensor ):
snake_case_ : Dict = image.shape[0]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
snake_case_ : Dict = len(lowercase_ )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase_ )}" )
snake_case_ : str = self._execution_device
snake_case_ : Optional[int] = batch_size * num_images_per_prompt
snake_case_ : str = guidance_scale > 1.0
snake_case_ : Any = self._encode_image(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# prior
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
snake_case_ : str = self.scheduler.timesteps
snake_case_ : List[Any] = self.prior.config.num_embeddings
snake_case_ : Optional[int] = self.prior.config.embedding_dim
snake_case_ : Optional[int] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
snake_case_ : List[Any] = latents.reshape(latents.shape[0] , lowercase_ , lowercase_ )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
snake_case_ : Optional[Any] = self.prior(
lowercase_ , timestep=lowercase_ , proj_embedding=lowercase_ , ).predicted_image_embedding
# remove the variance
snake_case_ : Dict = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
snake_case_ : int = noise_pred.chunk(2 )
snake_case_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
snake_case_ : Dict = self.scheduler.step(
lowercase_ , timestep=lowercase_ , sample=lowercase_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase_ )
snake_case_ : List[Any] = []
for i, latent in enumerate(lowercase_ ):
print()
snake_case_ : Any = self.renderer.decode(
latent[None, :] , lowercase_ , size=lowercase_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase_ )
snake_case_ : List[str] = torch.stack(lowercase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
snake_case_ : Tuple = images.cpu().numpy()
if output_type == "pil":
snake_case_ : Dict = [self.numpy_to_pil(lowercase_ ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase_ )
| 350
|
"""simple docstring"""
def __lowercase ( _a ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
if len(snake_case_ ) == 0:
return False
__UpperCAmelCase = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_lowercase : Tuple = input('Enter numbers separated by comma:\n').strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(',')]
_lowercase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowercase : List[Any] = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 332
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowercase : str = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
lowercase : Dict = dataset.iloc[:, 1:2].values
lowercase : Dict = dataset.iloc[:, 2].values
lowercase : List[str] = train_test_split(X, y, test_size=0.2, random_state=0)
lowercase : str = PolynomialFeatures(degree=4)
lowercase : List[Any] = poly_reg.fit_transform(X)
lowercase : int = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case( ) -> str:
plt.scatter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE__ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 371
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= None
_a : Optional[Any]= BloomTokenizerFast
_a : Tuple= BloomTokenizerFast
_a : str= True
_a : Optional[int]= False
_a : List[Any]= "tokenizer_file"
_a : List[Any]= {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.get_rust_tokenizer()
lowercase : List[str] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
lowercase : Optional[int] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase : Any = tokenizer.batch_encode_plus(snake_case )["""input_ids"""]
self.assertListEqual(snake_case ,snake_case )
lowercase : Optional[int] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case ,**snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase : Dict = """This is a simple input"""
lowercase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
lowercase : Dict = ("""This is a simple input""", """This is a pair""")
lowercase : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
tokenizer_r.encode(snake_case ,max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case ,max_length=snake_case )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase : Optional[int] = None # Hotfixing padding = None
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(snake_case ,tokenizer_r.encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case ,tokenizer_r.batch_encode_plus ,snake_case ,max_length=snake_case ,padding="""max_length""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : List[str] = load_dataset("""xnli""" ,"""all_languages""" ,split="""test""" ,streaming=snake_case )
lowercase : Optional[Any] = next(iter(snake_case ) )["""premise"""] # pick up one data
lowercase : str = list(sample_data.values() )
lowercase : Optional[int] = list(map(tokenizer.encode ,snake_case ) )
lowercase : Dict = [tokenizer.decode(snake_case ,clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 285
| 0
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCamelCase : int = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Any = set()
UpperCamelCase : Union[str, Any] = []
def parse_line(_lowerCAmelCase ):
for line in fp:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : Dict = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = "\n".join(_lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCAmelCase )
buffer.clear()
continue
else:
UpperCamelCase : List[Any] = line.strip()
buffer.append(_lowerCAmelCase )
if from_gh:
for filename in os.listdir(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if not os.path.isdir(_lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCAmelCase ) as fp:
parse_line(_lowerCAmelCase )
else:
try:
with zipfile.ZipFile(_lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCAmelCase ) as fp:
parse_line(_lowerCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
UpperCamelCase : List[str] = set()
UpperCamelCase : Dict = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCAmelCase , _lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def A_ ( _lowerCAmelCase ) -> Tuple:
return values.split("," )
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCamelCase : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets)
__lowerCamelCase : List[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 52
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52
| 1
|
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any] ):
lowercase :Any = name
lowercase :str = value
lowercase :List[str] = weight
def __repr__( self: List[str] ):
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE ( self: int ):
return self.value
def SCREAMING_SNAKE_CASE ( self: Any ):
return self.name
def SCREAMING_SNAKE_CASE ( self: Dict ):
return self.weight
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
return self.value / self.weight
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = []
for i in range(len(lowerCamelCase ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Any = sorted(lowerCamelCase, key=lowerCamelCase, reverse=lowerCamelCase )
lowercase :Union[str, Any] = []
lowercase , lowercase :Optional[int] = 0.0, 0.0
for i in range(len(lowerCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.