code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE = 4 ) -> Dict:
snake_case_ = abs(UpperCamelCase__ ) or 4
return [[1 + x + y * row_size for x in range(UpperCamelCase__ )] for y in range(UpperCamelCase__ )]
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return reverse_row(transpose(UpperCamelCase__ ) )
# OR.. transpose(reverse_column(matrix))
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return reverse_row(reverse_column(UpperCamelCase__ ) )
# OR.. reverse_column(reverse_row(matrix))
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
return reverse_column(transpose(UpperCamelCase__ ) )
# OR.. transpose(reverse_row(matrix))
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = [list(UpperCamelCase__ ) for x in zip(*UpperCamelCase__ )]
return matrix
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = matrix[::-1]
return matrix
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = [x[::-1] for x in matrix]
return matrix
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for i in matrix:
print(*UpperCamelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__SCREAMING_SNAKE_CASE : Optional[Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__SCREAMING_SNAKE_CASE : List[str] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 347 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase : Union[str, Any] = pytest.mark.integration
UpperCAmelCase : Union[str, Any] = {"comet"}
UpperCAmelCase : Tuple = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase : Optional[Any] = {"code_eval"}
UpperCAmelCase : Union[str, Any] = os.name == "nt"
UpperCAmelCase : int = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase : Any = importlib.util.find_spec("transformers") is not None
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self , __lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self , __lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase__ )
def wrapper(self , __lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , UpperCamelCase__ )
return wrapper
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
lowercase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
snake_case_ , snake_case_ , snake_case_ )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
lowercase__ = {}
lowercase__ = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = """[...]"""
lowercase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCAmelCase__)).module_path)
lowercase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCAmelCase__)
# check parameters
lowercase_ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase__ , metric_module.__name__):
with self.use_local_metrics():
try:
lowercase_ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ = """[...]"""
lowercase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCAmelCase__)).module_path)
# run doctest
with self.use_local_metrics():
lowercase_ = doctest.testmod(UpperCAmelCase__ , verbose=UpperCAmelCase__ , raise_on_error=UpperCAmelCase__)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__):
yield
else:
yield
@contextmanager
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
def load_local_metric(lowerCAmelCase_ : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple):
return load_metric(os.path.join("""metrics""" , UpperCAmelCase__) , *UpperCAmelCase__ , **UpperCAmelCase__)
with patch("""datasets.load_metric""") as mock_load_metric:
lowercase_ = load_local_metric
yield
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
def wrapper(lowerCAmelCase_ : str):
lowercase_ = contextmanager(UpperCAmelCase__)
lowercase_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[str]):
"""simple docstring"""
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
lowercase_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
lowercase_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
def load_from_checkpoint(__lowerCAmelCase ):
class SCREAMING_SNAKE_CASE__ :
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
assert len(UpperCAmelCase__) == 2
lowercase_ = [0.19, 0.92]
return scores, sum(UpperCAmelCase__) / len(UpperCAmelCase__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
lowercase_ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
lowercase_ = load_from_checkpoint
yield
def _SCREAMING_SNAKE_CASE () -> int:
'''simple docstring'''
lowercase_ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
lowercase_ = """ERROR"""
lowercase_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCamelCase__ , match=re.escape(UpperCamelCase__ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase__ )
| 136 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase : Union[str, Any] = False
@skip_mps
class __snake_case ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase__ = False
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
torch.manual_seed(0 )
__snake_case: Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
__snake_case: List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
__snake_case: str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case: Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__snake_case: int = CLIPTextModel(UpperCAmelCase__ )
__snake_case: Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__snake_case: Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Union[str, Any] , A : int , A : Optional[int]=0 ):
if str(UpperCAmelCase__ ).startswith("""mps""" ):
__snake_case: Dict = torch.manual_seed(UpperCAmelCase__ )
else:
__snake_case: str = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__snake_case: Any = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = """cpu"""
__snake_case: int = self.get_dummy_components()
__snake_case: Optional[Any] = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__snake_case: List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
__snake_case: Optional[int] = pipe(**UpperCAmelCase__ ).images
__snake_case: List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__snake_case: str = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__snake_case: Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def UpperCAmelCase__ ( self : int ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCAmelCase__ ( self : List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def UpperCAmelCase__ ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCAmelCase__ ( self : Any ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : int ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def UpperCAmelCase__ ( cls : str ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = torch.manual_seed(51 )
__snake_case: Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__snake_case: List[Any] = """a painting of an elephant with glasses"""
__snake_case: int = [5, 7]
__snake_case: Optional[Any] = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__snake_case: int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( snake_case_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :List[str] = parent
UpperCamelCase :str = config_class
UpperCamelCase :Tuple = has_text_modality
UpperCamelCase :str = kwargs
UpperCamelCase :Optional[int] = common_properties
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Tuple = self.config_class(**self.inputs_dict )
UpperCamelCase :Any = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase__ ):
try:
setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase__ ):
try:
UpperCamelCase :str = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , msg=F'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :str = self.config_class(**self.inputs_dict )
UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :str = os.path.join(UpperCAmelCase__ , '''config.json''' )
config_first.to_json_file(UpperCAmelCase__ )
UpperCamelCase :Union[str, Any] = self.config_class.from_json_file(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase__ )
UpperCamelCase :Tuple = self.config_class.from_pretrained(UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :str = self.config_class(**self.inputs_dict )
UpperCamelCase :int = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase :List[Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
config_first.save_pretrained(UpperCAmelCase__ )
UpperCamelCase :str = self.config_class.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Union[str, Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
UpperCamelCase :int = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
if self.config_class.is_composition:
return
UpperCamelCase :Union[str, Any] = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = copy.deepcopy(UpperCAmelCase__ )
UpperCamelCase :str = self.config_class(**UpperCAmelCase__ )
UpperCamelCase :str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase__ , UpperCAmelCase__ ) != value:
wrong_values.append((key, getattr(UpperCAmelCase__ , UpperCAmelCase__ ), value) )
if len(UpperCAmelCase__ ) > 0:
UpperCamelCase :List[str] = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def UpperCAmelCase ( self ) -> Any:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[Any] = generator.manual_seed(0 )
_a : str = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """cyberpunk 2077"""
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Any = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : int = """A painting of a squirrel eating a burger """
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = {}
with open(UpperCamelCase__, "r" ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCamelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(UpperCamelCase__ )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", UpperCamelCase__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return is_used
return is_used
def lowerCAmelCase_ ( __A, __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True, __A=False ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(UpperCamelCase__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=UpperCamelCase__, return_attention_mask=UpperCamelCase__, )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(UpperCamelCase__, "vocab.json" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(UpperCamelCase__, "w", encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
UpperCamelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=UpperCamelCase__, )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=UpperCamelCase__, return_attention_mask=UpperCamelCase__, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaForCTC(UpperCamelCase__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(UpperCamelCase__ )
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=UpperCamelCase__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase__, UpperCamelCase__, not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
A_ = logging.get_logger(__name__)
A_ = "T5Config"
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Any = jnp.zeros_like(UpperCamelCase__ )
SCREAMING_SNAKE_CASE:Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE:Union[str, Any] = shifted_input_ids.at[:, 0].set(UpperCamelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = jnp.where(shifted_input_ids == -100 , UpperCamelCase__ , UpperCamelCase__ )
return shifted_input_ids
class _snake_case ( snake_case_ ):
_A : List[Any] = '''mt5'''
_A : Optional[int] = MTaConfig
class _snake_case ( snake_case_ ):
_A : Dict = '''mt5'''
_A : Union[str, Any] = MTaConfig
class _snake_case ( snake_case_ ):
_A : int = '''mt5'''
_A : List[Any] = MTaConfig
| 139 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a_ = logging.get_logger(__name__)
a_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a_ = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase__ ( snake_case_ ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =BlenderbotSmallTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , )-> str:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase__ = add_prefix_space
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> int:
'''simple docstring'''
lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "MobileNetV1Config"
# Base docstring
__A = "google/mobilenet_v1_1.0_224"
__A = [1, 1_024, 7, 7]
# Image classification docstring
__A = "google/mobilenet_v1_1.0_224"
__A = "tabby, tabby cat"
__A = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int]=None ):
snake_case : Optional[int] = {}
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = model.mobilenet_va
else:
snake_case : Optional[Any] = model
snake_case : int = """MobilenetV1/Conv2d_0/"""
snake_case : Any = backbone.conv_stem.convolution.weight
snake_case : str = backbone.conv_stem.normalization.bias
snake_case : Any = backbone.conv_stem.normalization.weight
snake_case : Any = backbone.conv_stem.normalization.running_mean
snake_case : int = backbone.conv_stem.normalization.running_var
for i in range(13 ):
snake_case : int = i + 1
snake_case : Optional[int] = i * 2
snake_case : Optional[Any] = backbone.layer[pt_index]
snake_case : List[str] = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
snake_case : Any = pointer.convolution.weight
snake_case : Dict = pointer.normalization.bias
snake_case : Tuple = pointer.normalization.weight
snake_case : Optional[Any] = pointer.normalization.running_mean
snake_case : int = pointer.normalization.running_var
snake_case : Optional[int] = backbone.layer[pt_index + 1]
snake_case : Optional[Any] = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
snake_case : Optional[int] = pointer.convolution.weight
snake_case : str = pointer.normalization.bias
snake_case : Optional[Any] = pointer.normalization.weight
snake_case : Any = pointer.normalization.running_mean
snake_case : Union[str, Any] = pointer.normalization.running_var
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[int] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
snake_case : Any = model.classifier.weight
snake_case : Union[str, Any] = model.classifier.bias
return tf_to_pt_map
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
snake_case : List[str] = tf.train.list_variables(UpperCamelCase__ )
snake_case : List[Any] = {}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
snake_case : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = array
# Build TF to PyTorch weights loading map
snake_case : Optional[Any] = _build_tf_to_pytorch_map(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
snake_case : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
snake_case : Tuple = np.transpose(UpperCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
snake_case : List[Any] = array.squeeze().transpose()
else:
snake_case : List[str] = np.transpose(UpperCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
snake_case : Optional[Any] = torch.from_numpy(UpperCamelCase__ )
tf_weights.pop(UpperCamelCase__ , UpperCamelCase__ )
tf_weights.pop(name + "/RMSProp" , UpperCamelCase__ )
tf_weights.pop(name + "/RMSProp_1" , UpperCamelCase__ )
tf_weights.pop(name + "/ExponentialMovingAverage" , UpperCamelCase__ )
logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' )
return model
def UpperCamelCase__ ( lowercase__ : str , lowercase__ : List[Any] ):
snake_case : Optional[Any] = features.shape[-2:]
snake_case : Dict = conv_layer.stride
snake_case : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
snake_case : Optional[Any] = max(kernel_height - stride_height , 0 )
else:
snake_case : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
snake_case : Any = max(kernel_width - stride_width , 0 )
else:
snake_case : int = max(kernel_width - (in_width % stride_width) , 0 )
snake_case : str = pad_along_width // 2
snake_case : Union[str, Any] = pad_along_width - pad_left
snake_case : Optional[int] = pad_along_height // 2
snake_case : List[Any] = pad_along_height - pad_top
snake_case : Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCamelCase__ , UpperCamelCase__ , "constant" , 0.0 )
class lowerCamelCase__ ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
super().__init__()
snake_case : Any = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
snake_case : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
snake_case : List[Any] = nn.Convad(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=UpperCAmelCase__ , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
snake_case : int = nn.BatchNormad(
num_features=UpperCAmelCase__ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase__ , track_running_stats=UpperCAmelCase__ , )
else:
snake_case : int = None
if use_activation:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase__ ):
snake_case : int = ACTaFN[config.hidden_act]
else:
snake_case : str = config.hidden_act
else:
snake_case : Union[str, Any] = None
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.tf_padding:
snake_case : List[str] = apply_tf_padding(UpperCAmelCase__ , self.convolution )
snake_case : List[str] = self.convolution(UpperCAmelCase__ )
if self.normalization is not None:
snake_case : Optional[int] = self.normalization(UpperCAmelCase__ )
if self.activation is not None:
snake_case : Dict = self.activation(UpperCAmelCase__ )
return features
class lowerCamelCase__ ( snake_case_ ):
a__ : Dict = MobileNetVaConfig
a__ : Optional[Any] = load_tf_weights_in_mobilenet_va
a__ : Dict = '''mobilenet_v1'''
a__ : Any = '''pixel_values'''
a__ : str = False
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class lowerCamelCase__ ( snake_case_ ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
snake_case : List[Any] = config
snake_case : Any = 32
snake_case : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
snake_case : Optional[Any] = MobileNetVaConvLayer(
UpperCAmelCase__ , in_channels=config.num_channels , out_channels=UpperCAmelCase__ , kernel_size=3 , stride=2 , )
snake_case : int = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
snake_case : List[Any] = nn.ModuleList()
for i in range(13 ):
snake_case : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
snake_case : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , kernel_size=1 , ) )
snake_case : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
snake_case : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
snake_case : Tuple = self.conv_stem(UpperCAmelCase__ )
snake_case : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
snake_case : Optional[int] = layer_module(UpperCAmelCase__ )
if output_hidden_states:
snake_case : int = all_hidden_states + (hidden_states,)
snake_case : int = hidden_states
if self.pooler is not None:
snake_case : Tuple = torch.flatten(self.pooler(UpperCAmelCase__ ) , start_dim=1 )
else:
snake_case : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class lowerCamelCase__ ( snake_case_ ):
def __init__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
snake_case : int = config.num_labels
snake_case : List[Any] = MobileNetVaModel(UpperCAmelCase__ )
snake_case : Union[str, Any] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
snake_case : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase__ )
snake_case : Tuple = nn.Linear(UpperCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Any = self.mobilenet_va(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
snake_case : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
snake_case : List[Any] = self.classifier(self.dropout(UpperCAmelCase__ ) )
snake_case : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case : str = """single_label_classification"""
else:
snake_case : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
snake_case : Optional[Any] = MSELoss()
if self.num_labels == 1:
snake_case : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case : Optional[int] = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
snake_case : Optional[Any] = CrossEntropyLoss()
snake_case : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case : Any = BCEWithLogitsLoss()
snake_case : Union[str, Any] = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states , )
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_a = 6
_a = 1
_a = 19_01
_a = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_a = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_a = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_a = day - days_per_month[month - 2]
if month > 12:
year += 1
_a = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase_ :
def __init__( self : List[Any] , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : list[list[int]] , ) -> None:
'''simple docstring'''
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def __magic_name__ ( self : Tuple ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __magic_name__ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __magic_name__ ( self : List[str] ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __magic_name__ ( self : Optional[Any] ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def __magic_name__ ( self : Dict , **snake_case_ : Optional[Any] ) -> None:
'''simple docstring'''
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
A__ = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __magic_name__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[str] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1_0_0 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
snake_case_ = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case_ = audio_length_in_s * self.unet.config.sample_rate
snake_case_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
snake_case_ = int(UpperCAmelCase__ )
if sample_size % down_scale_factor != 0:
snake_case_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
snake_case_ = int(UpperCAmelCase__ )
snake_case_ = next(iter(self.unet.parameters() ) ).dtype
snake_case_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ , device=audio.device )
snake_case_ = self.scheduler.timesteps.to(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
snake_case_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCAmelCase__ )
| 159 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__SCREAMING_SNAKE_CASE : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
__SCREAMING_SNAKE_CASE : str = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case_ = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case_ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
snake_case_ = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
snake_case_ = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case_ = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case_ = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case_ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
snake_case_ = """rwkv.""" + name
snake_case_ = weight
return state_dict
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case_ = 50_277
snake_case_ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case_ = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
snake_case_ = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
snake_case_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
snake_case_ = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
snake_case_ = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
snake_case_ = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
snake_case_ = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
snake_case_ = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case_ = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 347 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 0 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase : Union[str, Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase : List[str] = [0, 25, 50]
UpperCAmelCase : List[str] = [25, 50, 75]
UpperCAmelCase : Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase : str = np.ones(75)
UpperCAmelCase : Dict = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase : List[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase : Any = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase : Dict = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase : Tuple = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase : List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 136 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: Optional[Any] = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
__snake_case: Any = True if """large""" in model_name or """huge""" in model_name else False
__snake_case: Union[str, Any] = True if """large""" in model_name or """huge""" in model_name else False
__snake_case: Any = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case: Union[str, Any] = [3, 3, 3, 3]
__snake_case: Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case: int = [4, 4, 4, 4]
__snake_case: Tuple = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case: Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case: List[str] = [3, 3, 3, 3]
else:
__snake_case: List[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case: int = 96
elif "small" in model_name:
__snake_case: List[str] = 96
elif "base" in model_name:
__snake_case: Dict = 128
elif "large" in model_name:
__snake_case: Optional[int] = 192
elif "xlarge" in model_name:
__snake_case: List[Any] = 256
elif "huge" in model_name:
__snake_case: Optional[int] = 352
# set label information
__snake_case: Any = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
__snake_case: str = """imagenet-22k-id2label.json"""
else:
__snake_case: Optional[Any] = """imagenet-1k-id2label.json"""
__snake_case: Tuple = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""") , """r"""))
__snake_case: Dict = {int(UpperCamelCase__): v for k, v in idalabel.items()}
__snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
__snake_case: List[str] = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
if "patch_embed.proj" in name:
__snake_case: List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
__snake_case: Optional[int] = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
__snake_case: str = """encoder.""" + name
if "encoder.layers" in name:
__snake_case: Any = name.replace("""encoder.layers""" , """encoder.stages""")
if "downsample.proj" in name:
__snake_case: Any = name.replace("""downsample.proj""" , """downsample.projection""")
if "blocks" in name:
__snake_case: List[str] = name.replace("""blocks""" , """layers""")
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case: Tuple = name.replace("""modulation.f""" , """modulation.projection_in""")
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case: List[str] = name.replace("""modulation.h""" , """modulation.projection_context""")
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case: Optional[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""")
if name == "norm.weight":
__snake_case: Dict = """layernorm.weight"""
if name == "norm.bias":
__snake_case: int = """layernorm.bias"""
if "head" in name:
__snake_case: Union[str, Any] = name.replace("""head""" , """classifier""")
else:
__snake_case: Optional[Any] = """focalnet.""" + name
return name
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> List[Any]:
__snake_case: Dict = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
__snake_case: Optional[Any] = model_name_to_url[model_name]
print("""Checkpoint URL: """ , UpperCamelCase__)
__snake_case: List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""")["""model"""]
# rename keys
for key in state_dict.copy().keys():
__snake_case: List[str] = state_dict.pop(UpperCamelCase__)
__snake_case: Any = val
__snake_case: Union[str, Any] = get_focalnet_config(UpperCamelCase__)
__snake_case: List[Any] = FocalNetForImageClassification(UpperCamelCase__)
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__)
# verify conversion
__snake_case: Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case: Optional[Any] = BitImageProcessor(
do_resize=UpperCamelCase__ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__snake_case: Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__).raw)
__snake_case: Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="""pt""")
__snake_case: Tuple = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
])
__snake_case: Dict = image_transforms(UpperCamelCase__).unsqueeze(0)
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1e-4)
__snake_case: Any = model(**UpperCamelCase__)
__snake_case: Dict = outputs.logits.argmax(-1).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx])
print("""First values of logits:""" , outputs.logits[0, :3])
if model_name == "focalnet-tiny":
__snake_case: str = torch.tensor([0.2_166, -0.4_368, 0.2_191])
elif model_name == "focalnet-tiny-lrf":
__snake_case: int = torch.tensor([1.1_669, 0.0_125, -0.1_695])
elif model_name == "focalnet-small":
__snake_case: Optional[Any] = torch.tensor([0.4_917, -0.0_430, 0.1_341])
elif model_name == "focalnet-small-lrf":
__snake_case: Optional[Any] = torch.tensor([-0.2_588, -0.5_342, -0.2_331])
elif model_name == "focalnet-base":
__snake_case: Dict = torch.tensor([-0.1_655, -0.4_090, -0.1_730])
elif model_name == "focalnet-base-lrf":
__snake_case: int = torch.tensor([0.5_306, -0.0_483, -0.3_928])
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4)
print("""Looks ok!""")
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(UpperCamelCase__)
processor.save_pretrained(UpperCamelCase__)
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''')
model.push_to_hub(F'''{model_name}''')
processor.push_to_hub(F'''{model_name}''')
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__UpperCAmelCase : Dict = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 111 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase :List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__ , id=UpperCamelCase__ )
| 259 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 0 |
def lowerCAmelCase_ ( __A = 1_000_000 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = set(range(3, UpperCamelCase__, 2 ) )
primes.add(2 )
for p in range(3, UpperCamelCase__, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, UpperCamelCase__, UpperCamelCase__ ) ) )
UpperCAmelCase__ = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__, limit + 1, UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict=3 ,SCREAMING_SNAKE_CASE__ : int=32 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : Any=10 ,SCREAMING_SNAKE_CASE__ : Tuple=[10, 20, 30, 40] ,SCREAMING_SNAKE_CASE__ : List[str]=[1, 1, 2, 1] ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[Any]="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,):
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:List[str] = batch_size
SCREAMING_SNAKE_CASE:Optional[Any] = image_size
SCREAMING_SNAKE_CASE:Dict = num_channels
SCREAMING_SNAKE_CASE:List[str] = embeddings_size
SCREAMING_SNAKE_CASE:str = hidden_sizes
SCREAMING_SNAKE_CASE:Union[str, Any] = depths
SCREAMING_SNAKE_CASE:Optional[Any] = is_training
SCREAMING_SNAKE_CASE:Optional[Any] = use_labels
SCREAMING_SNAKE_CASE:str = hidden_act
SCREAMING_SNAKE_CASE:str = num_labels
SCREAMING_SNAKE_CASE:List[Any] = scope
SCREAMING_SNAKE_CASE:str = len(UpperCAmelCase__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:str = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ):
SCREAMING_SNAKE_CASE:Any = FlaxRegNetModel(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:str = model(UpperCAmelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE:Union[str, Any] = FlaxRegNetForImageClassification(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE:Tuple = config_and_inputs
SCREAMING_SNAKE_CASE:Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( snake_case_ , unittest.TestCase ):
_A : str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_A : Dict = False
_A : Optional[Any] = False
_A : int = False
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:str = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE:Optional[int] = ConfigTester(self ,config_class=UpperCAmelCase__ ,has_text_modality=UpperCAmelCase__ )
def __UpperCamelCase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Optional[int] ):
return
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCamelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCamelCase ( self : Optional[Any] ):
pass
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Tuple = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE:Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE:Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,UpperCAmelCase__ )
def __UpperCamelCase ( self : Tuple ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:List[str] = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = model(**self._prepare_for_class(UpperCAmelCase__ ,UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE:Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE:Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Dict = True
check_hidden_states_output(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE:Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE:Optional[Any] = self._prepare_for_class(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[int] = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return model(pixel_values=UpperCAmelCase__ ,**UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE:str = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE:Any = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) ,len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ ,UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def A_ ( ):
SCREAMING_SNAKE_CASE:Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _snake_case ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Any ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
SCREAMING_SNAKE_CASE:Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE:Dict = prepare_img()
SCREAMING_SNAKE_CASE:List[Any] = image_processor(images=UpperCAmelCase__ ,return_tensors="np" )
SCREAMING_SNAKE_CASE:Optional[int] = model(**UpperCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE:Optional[int] = (1, 1_000)
self.assertEqual(outputs.logits.shape ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[Any] = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,UpperCAmelCase__ ,atol=1e-4 ) )
| 139 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a_ = logging.get_logger(__name__)
class lowercase__ :
a_ =42
a_ =None
@staticmethod
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase ( cls )-> Dict:
'''simple docstring'''
return F"`pip install {cls.pip_package or cls.name}`"
class lowercase__ ( snake_case_ ):
a_ ='''optuna'''
@staticmethod
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase__ )
class lowercase__ ( snake_case_ ):
a_ ='''ray'''
a_ ='''\'ray[tune]\''''
@staticmethod
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase__ )
class lowercase__ ( snake_case_ ):
a_ ='''sigopt'''
@staticmethod
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase__ )
class lowercase__ ( snake_case_ ):
a_ ='''wandb'''
@staticmethod
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase__ )
a_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _a ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCamelCase__ ) > 0:
lowerCAmelCase__ = available_backends[0].name
if len(UpperCamelCase__ ) > 1:
logger.info(
F"{len(UpperCamelCase__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase__ ( snake_case_ ):
a__ : Dict = '''sew-d'''
def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3_072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=("p2c", "c2p") , SCREAMING_SNAKE_CASE="layer_norm" , SCREAMING_SNAKE_CASE="gelu_python" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1E-7 , SCREAMING_SNAKE_CASE=1E-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
snake_case : int = hidden_size
snake_case : Optional[int] = feat_extract_norm
snake_case : Union[str, Any] = feat_extract_activation
snake_case : Union[str, Any] = list(UpperCAmelCase__ )
snake_case : str = list(UpperCAmelCase__ )
snake_case : Tuple = list(UpperCAmelCase__ )
snake_case : Union[str, Any] = conv_bias
snake_case : Optional[Any] = num_conv_pos_embeddings
snake_case : Any = num_conv_pos_embedding_groups
snake_case : Any = len(self.conv_dim )
snake_case : str = num_hidden_layers
snake_case : Dict = intermediate_size
snake_case : Optional[Any] = squeeze_factor
snake_case : Tuple = max_position_embeddings
snake_case : int = position_buckets
snake_case : Optional[int] = share_att_key
snake_case : Any = relative_attention
snake_case : str = norm_rel_ebd
snake_case : Dict = list(UpperCAmelCase__ )
snake_case : List[Any] = hidden_act
snake_case : Tuple = num_attention_heads
snake_case : Optional[int] = hidden_dropout
snake_case : Optional[Any] = attention_dropout
snake_case : str = activation_dropout
snake_case : Optional[Any] = feat_proj_dropout
snake_case : str = final_dropout
snake_case : Optional[int] = layer_norm_eps
snake_case : List[str] = feature_layer_norm_eps
snake_case : Dict = initializer_range
snake_case : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : List[str] = apply_spec_augment
snake_case : Any = mask_time_prob
snake_case : int = mask_time_length
snake_case : int = mask_time_min_masks
snake_case : Dict = mask_feature_prob
snake_case : int = mask_feature_length
snake_case : str = mask_feature_min_masks
# ctc loss
snake_case : Union[str, Any] = ctc_loss_reduction
snake_case : int = ctc_zero_infinity
# sequence classification
snake_case : int = use_weighted_layer_sum
snake_case : str = classifier_proj_size
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 148 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
__snake_case = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(UpperCamelCase__, '''rb''' ) as f:
_a = Image.open(UpperCamelCase__ )
return im.convert('''RGB''' )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
A_ : Optional[str] = field(
default=snake_case_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A_ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A_ : Optional[str] = field(default=snake_case_ , metadata={'help': 'A folder containing the training data.'} )
A_ : Optional[str] = field(default=snake_case_ , metadata={'help': 'A folder containing the validation data.'} )
A_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A_ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _UpperCAmelCase ( self ) -> List[str]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
A_ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A_ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case_ )} , )
A_ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : str = field(default=snake_case_ , metadata={'help': 'Name or path of preprocessor config.'} )
A_ : bool = field(
default=snake_case_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=snake_case_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = torch.stack([example['''pixel_values'''] for example in examples] )
_a = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def A_ ( ):
"""simple docstring"""
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', UpperCamelCase__, UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
_a = {}
if data_args.train_dir is not None:
_a = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
_a = os.path.join(data_args.validation_dir, '''**''' )
_a = load_dataset(
'''imagefolder''', data_files=UpperCamelCase__, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
_a = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, UpperCamelCase__ ) and data_args.train_val_split > 0.0:
_a = dataset["""train"""].train_test_split(data_args.train_val_split )
_a = split["""train"""]
_a = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_a = dataset["""train"""].features["""labels"""].names
_a = {}, {}
for i, label in enumerate(UpperCamelCase__ ):
_a = str(UpperCamelCase__ )
_a = label
# Load the accuracy metric from the datasets package
_a = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : List[str] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
_a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(UpperCamelCase__ ), labelaid=UpperCamelCase__, idalabel=UpperCamelCase__, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=UpperCamelCase__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
_a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_a = image_processor.size["""shortest_edge"""]
else:
_a = (image_processor.size["""height"""], image_processor.size["""width"""])
_a = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
_a = Compose(
[
RandomResizedCrop(UpperCamelCase__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_a = Compose(
[
Resize(UpperCamelCase__ ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
normalize,
] )
def train_transforms(_lowerCAmelCase : Tuple ):
_a = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(_lowerCAmelCase : int ):
_a = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
_a = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCamelCase__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
_a = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCamelCase__ )
# Initalize our trainer
_a = Trainer(
model=UpperCamelCase__, args=UpperCamelCase__, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=UpperCamelCase__, tokenizer=UpperCamelCase__, data_collator=UpperCamelCase__, )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics('''eval''', UpperCamelCase__ )
trainer.save_metrics('''eval''', UpperCamelCase__ )
# Write model card and (optionally) push to hub
_a = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
if __name__ == "__main__":
main() | 320 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
re.sub("<n>" , "" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 247 |
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE :Tuple = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Tuple:
'''simple docstring'''
snake_case_ = test_results.split(" " )
snake_case_ = 0
snake_case_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case_ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
snake_case_ = None
snake_case_ = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , UpperCamelCase__ ):
snake_case_ = True
snake_case_ = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
snake_case_ = line
snake_case_ = False
return failures
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = title
snake_case_ = doc_test_results["""time_spent"""].split("," )[0]
snake_case_ = doc_test_results["""success"""]
snake_case_ = doc_test_results["""failures"""]
snake_case_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case_ = doc_test_results
@property
def lowerCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
snake_case_ = [self._time_spent]
snake_case_ = 0
for time in time_spent:
snake_case_ = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCAmelCase__ ) == 1:
snake_case_ = [0, 0, time_parts[0]]
snake_case_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
snake_case_ = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return F'''{int(UpperCAmelCase__ )}h{int(UpperCAmelCase__ )}m{int(UpperCAmelCase__ )}s'''
@property
def lowerCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ = 4_0
snake_case_ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )}
snake_case_ = """"""
for category, failures in category_failures.items():
if len(UpperCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCAmelCase__ )
@staticmethod
def lowerCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
snake_case_ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(UpperCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
snake_case_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
snake_case_ = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = """"""
for key, value in failures.items():
snake_case_ = value[:2_0_0] + """ [Truncated]""" if len(UpperCAmelCase__ ) > 2_5_0 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
snake_case_ = job_name
snake_case_ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
snake_case_ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
snake_case_ = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
snake_case_ = sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
snake_case_ = F'''*Num failures* :{len(job_result['failed'] )} \n'''
snake_case_ = job_result["""failures"""]
snake_case_ = self.get_reply_blocks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , text=UpperCAmelCase__ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'''Results for {job}''' , blocks=UpperCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCAmelCase ( )->Optional[int]:
'''simple docstring'''
snake_case_ = os.environ["""GITHUB_RUN_ID"""]
snake_case_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
snake_case_ = requests.get(UpperCamelCase__ ).json()
snake_case_ = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(UpperCamelCase__ ):
snake_case_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , UpperCamelCase__ )
return {}
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Optional[Any]:
'''simple docstring'''
snake_case_ = {}
if os.path.exists(UpperCamelCase__ ):
snake_case_ = os.listdir(UpperCamelCase__ )
for file in files:
try:
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , encoding="utf-8" ) as f:
snake_case_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCamelCase__ , UpperCamelCase__ )}.''' ) from e
return _artifact
def _lowerCAmelCase ( )->int:
'''simple docstring'''
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
snake_case_ = name
snake_case_ = []
def __str__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.name
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
self.paths.append({"name": self.name, "path": path} )
snake_case_ = {}
snake_case_ = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case_ = directory
if artifact_name not in _available_artifacts:
snake_case_ = Artifact(UpperCamelCase__ )
_available_artifacts[artifact_name].add_path(UpperCamelCase__ )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = get_job_links()
SCREAMING_SNAKE_CASE :Optional[int] = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE :Dict = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE :str = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE :Any = github_actions_job_links.get('''run_doctests''')
SCREAMING_SNAKE_CASE :List[str] = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
SCREAMING_SNAKE_CASE :str = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = handle_test_results(artifact['''stats'''])
SCREAMING_SNAKE_CASE :List[Any] = failed
SCREAMING_SNAKE_CASE :Tuple = success
SCREAMING_SNAKE_CASE :Optional[Any] = time_spent[1:-1] + ''', '''
SCREAMING_SNAKE_CASE :Union[str, Any] = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
SCREAMING_SNAKE_CASE :List[str] = line.replace('''FAILED ''', '''''')
SCREAMING_SNAKE_CASE :List[str] = line.split()[0].replace('''\n''', '''''')
if "::" in line:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Dict = line.split('''::''')
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE :Optional[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE :Any = all_failures[test] if test in all_failures else '''N/A'''
SCREAMING_SNAKE_CASE :Optional[Any] = failure
break
SCREAMING_SNAKE_CASE :Optional[int] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Any = list[list[int]]
# assigning initial values to the grid
__SCREAMING_SNAKE_CASE : str = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__SCREAMING_SNAKE_CASE : Dict = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if location := find_empty_location(UpperCamelCase__ ):
snake_case_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
snake_case_ = 0
return None
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
for row in grid:
for cell in row:
print(UpperCamelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__SCREAMING_SNAKE_CASE : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 347 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 0 |
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase__ ) + """/p022_names.txt""" ) as file:
lowercase_ = str(file.readlines()[0] )
lowercase_ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
lowercase_ = 0
lowercase_ = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
lowercase_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 136 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
import sys
__UpperCAmelCase : str = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Any = 1
for digit in s:
product *= int(UpperCamelCase__)
return product
def A__ ( SCREAMING_SNAKE_CASE__ = N) -> Tuple:
__snake_case: List[Any] = -sys.maxsize - 1
__snake_case: Tuple = n[:13]
__snake_case: Union[str, Any] = 13
while cur_index < len(UpperCamelCase__) - 13:
if int(n[cur_index]) >= int(substr[0]):
__snake_case: int = substr[1:] + n[cur_index]
cur_index += 1
else:
__snake_case: Tuple = max(UpperCamelCase__ , str_eval(UpperCamelCase__))
__snake_case: Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase_ ( snake_case_ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=UpperCAmelCase__ , help='''Name of the model to download''' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Dict = model
UpperCamelCase :Dict = cache
UpperCamelCase :Optional[Any] = force
UpperCamelCase :List[str] = trust_remote_code
def UpperCAmelCase ( self ) -> List[str]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[Any] = generator.manual_seed(0 )
_a : str = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """cyberpunk 2077"""
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Any = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : int = """A painting of a squirrel eating a burger """
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCAmelCase_ ( __A, __A, __A = 1 / sqrt(2 ) ) -> str:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 - _cos) / 2
UpperCAmelCase__ = 1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A = 1 / sqrt(2 ) ) -> int:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 + _cos) / 2
UpperCAmelCase__ = -1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A = 1 / sqrt(2 ) ) -> str:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = _sin / 2
UpperCAmelCase__ = 0
UpperCAmelCase__ = -ba
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A = 1 / sqrt(2 ) ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A, __A = 1 / sqrt(2 ), ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = 1 + alpha * big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha * big_a
UpperCAmelCase__ = 1 + alpha / big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha / big_a
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A, __A = 1 / sqrt(2 ), ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(UpperCamelCase__ ) * alpha
UpperCAmelCase__ = big_a * (pmc + aaa)
UpperCAmelCase__ = 2 * big_a * mpc
UpperCAmelCase__ = big_a * (pmc - aaa)
UpperCAmelCase__ = ppmc + aaa
UpperCAmelCase__ = -2 * pmpc
UpperCAmelCase__ = ppmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def lowerCAmelCase_ ( __A, __A, __A, __A = 1 / sqrt(2 ), ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(UpperCamelCase__ )
UpperCAmelCase__ = cos(UpperCamelCase__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(UpperCamelCase__ ) * alpha
UpperCAmelCase__ = big_a * (ppmc + aaa)
UpperCAmelCase__ = -2 * big_a * pmpc
UpperCAmelCase__ = big_a * (ppmc - aaa)
UpperCAmelCase__ = pmc + aaa
UpperCAmelCase__ = 2 * mpc
UpperCAmelCase__ = pmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModel.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = AutoModel.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[str] = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[str] = AutoModelForPreTraining.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = TFAutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[str] = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = AutoModelForCausalLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Tuple = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Tuple = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:str = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : int ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:str = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[Any] = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = TFAutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[Any] = AutoModelForMaskedLM.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = AutoModelForMaskedLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : Optional[int] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Any = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:str = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:int = AutoModelForSeqaSeqLM.from_pretrained(
UpperCAmelCase__ ,output_loading_info=UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:str = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE:Optional[int] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[Any] = AutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Optional[int] = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) ,14_410 )
SCREAMING_SNAKE_CASE:List[Any] = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) ,14_410 )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Tuple = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_pt=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) ,14_410 )
SCREAMING_SNAKE_CASE:str = AutoModelWithLMHead.from_pretrained(UpperCAmelCase__ ,from_tf=UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase__ ) ,14_410 )
| 139 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''PerceiverFeatureExtractor''']
a_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = torch.device("cpu")
def UpperCamelCase__ ( ):
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def UpperCamelCase__ ( lowercase__ : Tuple ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] ):
snake_case : Any = dct.pop(UpperCamelCase__ )
snake_case : Dict = val
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Tuple = []
for k in state_dict.keys():
snake_case : Any = k
if ".pwconv" in k:
snake_case : int = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
snake_case : List[str] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
snake_case : Tuple = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
snake_case : int = k_new.split("." )
if ls[2].isdigit():
snake_case : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
snake_case : Tuple = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : str ):
snake_case : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case : Optional[int] = 1000
snake_case : Optional[Any] = """huggingface/label-files"""
snake_case : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
snake_case : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case : Dict = idalabel
snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case : Any = [3, 3, 6, 4]
snake_case : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case : Any = [3, 3, 9, 6]
snake_case : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case : List[Any] = [4, 3, 10, 5]
snake_case : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case : List[Any] = [4, 4, 12, 6]
snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
snake_case : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="cpu" , check_hash=UpperCamelCase__ )
else:
snake_case : Dict = torch.load(UpperCamelCase__ , map_location="cpu" )
snake_case : int = checkpoint
snake_case : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
snake_case : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
snake_case : Any = prepare_img()
snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("preprocessor_config" )
snake_case : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="pt" )
# compare outputs from both models
snake_case : Dict = get_expected_output(UpperCamelCase__ )
snake_case : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
__A = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : Optional[torch.FloatTensor] = None
A_ : torch.FloatTensor = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase="cls" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_a = project_dim
_a = pooler_fn
_a = learn_encoder
_a = use_attention_mask
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A_ : Dict = [R'''pooler''', R'''logit_scale''']
A_ : Optional[Any] = [R'''position_ids''', R'''predictions.decoder.bias''']
A_ : List[str] = '''roberta'''
A_ : str = RobertaSeriesConfig
def __init__( self , __UpperCAmelCase ) -> Union[str, Any]:
super().__init__(UpperCAmelCase__ )
_a = XLMRobertaModel(UpperCAmelCase__ )
_a = nn.Linear(config.hidden_size , config.project_dim )
_a = getattr(UpperCAmelCase__ , '''has_pre_transformation''' , UpperCAmelCase__ )
if self.has_pre_transformation:
_a = nn.Linear(config.hidden_size , config.project_dim )
_a = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _UpperCAmelCase ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> Optional[Any]:
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.base_model(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_attentions=UpperCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCAmelCase__ , )
if self.has_pre_transformation:
_a = outputs["""hidden_states"""][-2]
_a = self.pre_LN(UpperCAmelCase__ )
_a = self.transformation_pre(UpperCAmelCase__ )
return TransformationModelOutput(
projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_a = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 247 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CycleDiffusionPipeline
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''}
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
_SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case_ = CLIPTextModel(UpperCAmelCase__ )
snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Dict=0 ) -> Optional[int]:
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
snake_case_ = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(UpperCAmelCase__ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
snake_case_ = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase__ )
snake_case_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case_ = pipe(**UpperCAmelCase__ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
snake_case_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase__ , "half" ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**UpperCAmelCase__ )
snake_case_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase__ )
snake_case_ = pipe(**UpperCAmelCase__ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
snake_case_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
snake_case_ = init_image.resize((5_1_2, 5_1_2) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder="scheduler" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type="np" , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowerCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
snake_case_ = init_image.resize((5_1_2, 5_1_2) )
snake_case_ = """CompVis/stable-diffusion-v1-4"""
snake_case_ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder="scheduler" )
snake_case_ = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ = """A black colored car"""
snake_case_ = """A blue colored car"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase__ , source_prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase__ , output_type="np" , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 159 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
"""simple docstring"""
from collections.abc import Callable
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = a
snake_case_ = b
if function(UpperCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase__ ) == 0:
return b
elif (
function(UpperCamelCase__ ) * function(UpperCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase__ ) == 0:
return mid
elif function(UpperCamelCase__ ) * function(UpperCamelCase__ ) < 0:
snake_case_ = mid
else:
snake_case_ = mid
snake_case_ = start + (end - start) / 2.0
return mid
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=10_24 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = [], []
lowercase_ = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = sorted_examples[0]
def is_too_big(__lowerCAmelCase ):
return tok(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase_ = new_src + """ """ + src
lowercase_ = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
lowercase_ = src, tgt
else: # can fit, keep adding
lowercase_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
lowercase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowercase_ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowercase_ = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowercase_ = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F'''packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
for split in ["val", "test"]:
lowercase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(UpperCamelCase__ , save_path / F'''{split}.source''' )
shutil.copyfile(UpperCamelCase__ , save_path / F'''{split}.target''' )
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase__ , default=1_28 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase__ )
parser.add_argument("""--save_path""" , type=UpperCamelCase__ )
lowercase_ = parser.parse_args()
lowercase_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 136 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __snake_case ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( snake_case_ , snake_case_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 3 , A : int = 32 , A : int = 256 , A : int = 32 , A : Optional[int] = None , A : float = 0.1_8215 , A : str = "group" , ):
super().__init__()
# pass init params to Encoder
__snake_case: Dict = Encoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , )
__snake_case: Optional[int] = vq_embed_dim if vq_embed_dim is not None else latent_channels
__snake_case: int = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
__snake_case: Optional[int] = VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__ )
__snake_case: Union[str, Any] = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
# pass init params to Decoder
__snake_case: List[Any] = Decoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , A : torch.FloatTensor , A : bool = True ):
__snake_case: str = self.encoder(UpperCAmelCase__ )
__snake_case: Union[str, Any] = self.quant_conv(UpperCAmelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = False , A : bool = True ):
# also go through quantization layer
if not force_not_quantize:
__snake_case: Any = self.quantize(UpperCAmelCase__ )
else:
__snake_case: int = h
__snake_case: Any = self.post_quant_conv(UpperCAmelCase__ )
__snake_case: List[Any] = self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : str , A : torch.FloatTensor , A : bool = True ):
__snake_case: Tuple = sample
__snake_case: Union[str, Any] = self.encode(UpperCAmelCase__ ).latents
__snake_case: Any = self.decode(UpperCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
| 111 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case_ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 259 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class A ( snake_case_ ):
def __init__(self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : int="<sep>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : int=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[str] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
UpperCAmelCase__ = jieba
UpperCAmelCase__ = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowercase_ (self : Optional[Any] ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Any , __UpperCAmelCase : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.remove_space:
UpperCAmelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize("NFKD" , UpperCAmelCase__ )
UpperCAmelCase__ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.preprocess_text(UpperCAmelCase__ )
UpperCAmelCase__ = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
UpperCAmelCase__ = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase__ )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def lowercase_ (self : Any , __UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , " " ).strip()
return out_string
def lowercase_ (self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ (self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def lowercase_ (self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def lowercase_ (self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase__ = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
'''simple docstring'''
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[Any] = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCAmelCase__ ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE:Any = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCAmelCase__ ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE:int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCAmelCase__ ,"torch_and_transformers_and_onnx" )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCAmelCase__ )
self.assertIn("torch_and_transformers" ,UpperCAmelCase__ )
self.assertIn("flax_and_transformers" ,UpperCAmelCase__ )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:List[str] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCAmelCase__ ,"\nCONSTANT = None\n" )
SCREAMING_SNAKE_CASE:Tuple = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCAmelCase__ ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
SCREAMING_SNAKE_CASE:List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE:List[Any] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCAmelCase__ ,UpperCAmelCase__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE:Optional[int] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCAmelCase__ )
| 139 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294 | 0 |
a_ = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
a_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( snake_case_ ):
a__ : str = (DEISMultistepScheduler,)
a__ : Dict = (('''num_inference_steps''', 25),)
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**UpperCAmelCase__ )
return config
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = dict(self.forward_default_kwargs )
snake_case : Union[str, Any] = kwargs.pop("num_inference_steps" , UpperCAmelCase__ )
snake_case : Union[str, Any] = self.dummy_sample
snake_case : List[str] = 0.1 * sample
snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
snake_case : Tuple = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
snake_case : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
snake_case : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : List[Any] = sample, sample
for t in range(UpperCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
snake_case : str = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
snake_case : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = dict(self.forward_default_kwargs )
snake_case : Optional[int] = kwargs.pop("num_inference_steps" , UpperCAmelCase__ )
snake_case : Optional[int] = self.dummy_sample
snake_case : Dict = 0.1 * sample
snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Any = self.get_scheduler_config()
snake_case : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
snake_case : Tuple = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
snake_case : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if scheduler is None:
snake_case : Dict = self.scheduler_classes[0]
snake_case : Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase__ )
snake_case : Dict = scheduler_class(**UpperCAmelCase__ )
snake_case : Union[str, Any] = self.scheduler_classes[0]
snake_case : List[str] = self.get_scheduler_config(**UpperCAmelCase__ )
snake_case : int = scheduler_class(**UpperCAmelCase__ )
snake_case : str = 10
snake_case : Optional[Any] = self.dummy_model()
snake_case : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : List[str] = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = dict(self.forward_default_kwargs )
snake_case : Tuple = kwargs.pop("num_inference_steps" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
snake_case : Optional[int] = self.get_scheduler_config()
snake_case : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
snake_case : int = self.dummy_sample
snake_case : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , "set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , "set_timesteps" ):
snake_case : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
snake_case : Dict = scheduler.timesteps[5]
snake_case : str = scheduler.timesteps[6]
snake_case : Optional[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
snake_case : int = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case : Dict = self.full_loop(scheduler=UpperCAmelCase__ )
snake_case : Tuple = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
snake_case : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case : Dict = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
snake_case : List[Any] = self.full_loop(scheduler=UpperCAmelCase__ )
snake_case : Tuple = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=UpperCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , algorithm_type="deis" , solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , algorithm_type=UpperCAmelCase__ , )
snake_case : int = self.full_loop(
solver_order=UpperCAmelCase__ , solver_type=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , algorithm_type=UpperCAmelCase__ , )
assert not torch.isnan(UpperCAmelCase__ ).any(), "Samples have nan numbers"
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=UpperCAmelCase__ )
self.check_over_configs(lower_order_final=UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=0 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.full_loop()
snake_case : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.full_loop(prediction_type="v_prediction" )
snake_case : Any = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.scheduler_classes[0]
snake_case : Any = self.get_scheduler_config(thresholding=UpperCAmelCase__ , dynamic_thresholding_ratio=0 )
snake_case : Tuple = scheduler_class(**UpperCAmelCase__ )
snake_case : int = 10
snake_case : Dict = self.dummy_model()
snake_case : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case : List[str] = model(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 148 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCAmelCase_ ( snake_case_ ):
lowercase__ = 42
class UpperCAmelCase_ ( snake_case_, snake_case_ ):
@register_to_config
def __init__( self : int , snake_case_ : int = 16 , snake_case_ : int = 88 , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 0.0 , snake_case_ : int = 32 , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : str = "geglu" , snake_case_ : bool = True , snake_case_ : bool = True , ) -> str:
'''simple docstring'''
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__ )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__ )
] )
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : int , snake_case_ : List[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=1 , snake_case_ : int=None , snake_case_ : bool = True , ) -> Dict:
'''simple docstring'''
A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A__ = self.norm(UpperCAmelCase__ )
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = self.proj_in(UpperCAmelCase__ )
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__ )
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__ )
| 247 |
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 | 0 |
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :str , lowerCAmelCase_ :List[str]=0 )->int:
'''simple docstring'''
if name is None:
snake_case_ = None
else:
snake_case_ = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ = fmt.format(UpperCamelCase__ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if msg is not None:
print(UpperCamelCase__ )
for k in val.keys():
recursive_print(UpperCamelCase__ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
print(UpperCamelCase__ , ":" , val.size() )
else:
print(UpperCamelCase__ , ":" , UpperCamelCase__ )
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Any , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :str , lowerCAmelCase_ :Optional[int] )->List[Any]:
'''simple docstring'''
snake_case_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ = param.view(*UpperCamelCase__ )
snake_case_ = param.transpose(0 , 2 )
snake_case_ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ = param.view(*UpperCamelCase__ )
snake_case_ = param.transpose(0 , 1 ).contiguous()
snake_case_ = param.view(*UpperCamelCase__ )
return param
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :str , lowerCAmelCase_ :Any )->Union[str, Any]:
'''simple docstring'''
snake_case_ = {}
# old versions did not store training args
snake_case_ = input_state_dict.get("args" , UpperCamelCase__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ = ds_args.padded_vocab_size
snake_case_ = ds_args.max_position_embeddings
snake_case_ = ds_args.hidden_size
snake_case_ = ds_args.num_layers
snake_case_ = ds_args.num_attention_heads
snake_case_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ = config.n_head
# The hidden_size per head.
snake_case_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ = input_state_dict["""checkpoint_version"""]
else:
snake_case_ = 0.0
# The model.
snake_case_ = input_state_dict["""model"""]
# The language model.
snake_case_ = model["""language_model"""]
# The embeddings.
snake_case_ = lm["""embedding"""]
# The word embeddings.
snake_case_ = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ = word_embeddings[: config.vocab_size, :]
snake_case_ = word_embeddings
# The position embeddings.
snake_case_ = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ = pos_embeddings
# The transformer.
snake_case_ = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
snake_case_ = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ = layer_re.match(UpperCamelCase__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ = int(m.group(1 ) )
# The name of the operation.
snake_case_ = m.group(2 )
# Is it a weight or a bias?
snake_case_ = m.group(3 )
# The name of the layer.
snake_case_ = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
snake_case_ = """ln_1""" if op_name.startswith("input" ) else """ln_2"""
snake_case_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ = torch.tensor(-1e4 , dtype=torch.floataa )
snake_case_ = masked_bias
snake_case_ = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ = fix_query_key_value_ordering(UpperCamelCase__ , UpperCamelCase__ , 3 , UpperCamelCase__ , UpperCamelCase__ )
# Store. No change of shape.
snake_case_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ = megatron_to_transformers[op_name]
snake_case_ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ = megatron_to_transformers[op_name]
snake_case_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ = transformer["""final_layernorm.weight"""]
snake_case_ = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ = word_embeddings
# It should be done!
return output_state_dict
def _lowerCAmelCase ( )->Tuple:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=UpperCamelCase__ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=UpperCamelCase__ , help="An optional config json file describing the pre-trained model." , )
snake_case_ = parser.parse_args()
# Extract the basename.
snake_case_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
snake_case_ = torch.load(UpperCamelCase__ , map_location="cpu" )
else:
snake_case_ = torch.load(args.path_to_checkpoint , map_location="cpu" )
snake_case_ = input_state_dict.get("args" , UpperCamelCase__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ = """gelu_new"""
else:
snake_case_ = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=UpperCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=UpperCamelCase__ , summary_activation=UpperCamelCase__ , summary_proj_to_labels=UpperCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
snake_case_ = GPTaConfig.from_json_file(args.config_file )
snake_case_ = ["""GPT2LMHeadModel"""]
# Convert.
print("Converting" )
snake_case_ = convert_megatron_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase__ , UpperCamelCase__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ = """gpt2"""
snake_case_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
snake_case_ = type(UpperCamelCase__ ).__name__
snake_case_ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(UpperCamelCase__ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCamelCase__ )
# Store the state_dict to file.
snake_case_ = os.path.join(UpperCamelCase__ , "pytorch_model.bin" )
print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A (nn.Module):
'''simple docstring'''
def __init__( self : Union[str, Any] ) ->int:
"""simple docstring"""
super().__init__()
snake_case_ = nn.Linear(3 , 4 )
snake_case_ = nn.BatchNormad(4 )
snake_case_ = nn.Linear(4 , 5 )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] ) ->int:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class __A (snake_case_):
'''simple docstring'''
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Any , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A (snake_case_):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
return output + 1
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = ModelForTest()
snake_case_ = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ModelForTest()
snake_case_ = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = ModelForTest()
snake_case_ = torch.randn(2 , 3 )
snake_case_ = test_model(x + 1 )
snake_case_ = test_model(x + 2 )
snake_case_ = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ModelForTest()
snake_case_ = torch.randn(2 , 3 )
snake_case_ = test_model(UpperCAmelCase__ )
snake_case_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case_ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = ModelForTest()
snake_case_ = torch.randn(2 , 3 )
snake_case_ = test_model(UpperCAmelCase__ )
snake_case_ = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
snake_case_ = True
snake_case_ = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
snake_case_ = torch.randn(2 , 3 ).to(0 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
snake_case_ = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
snake_case_ = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case_ = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
snake_case_ = torch.randn(2 , 3 )
snake_case_ = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 347 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(UpperCamelCase__ )
or left < -len(UpperCamelCase__ )
or right >= len(UpperCamelCase__ )
or right < -len(UpperCamelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowercase_ = (left + right) >> 1 # the middle
lowercase_ = find_max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # find max in range[left, mid]
lowercase_ = find_max(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 136 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
import math
import sys
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
if number != int(UpperCamelCase__):
raise ValueError("""the value of input must be a natural number""")
if number < 0:
raise ValueError("""the value of input must not be a negative number""")
if number == 0:
return 1
__snake_case: str = [-1] * (number + 1)
__snake_case: List[Any] = 0
for i in range(1 , number + 1):
__snake_case: List[str] = sys.maxsize
__snake_case: str = int(math.sqrt(UpperCamelCase__))
for j in range(1 , root + 1):
__snake_case: Union[str, Any] = 1 + answers[i - (j**2)]
__snake_case: Optional[int] = min(UpperCamelCase__ , UpperCamelCase__)
__snake_case: List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if nth_term == "":
return [""]
UpperCamelCase :Optional[int] = int(UpperCamelCase__ )
UpperCamelCase :Union[str, Any] = int(UpperCamelCase__ )
UpperCamelCase :list[str] = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(F'''1 / {pow(temp + 1 , int(UpperCamelCase__ ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = int(input("""Enter the last number (nth term) of the P-Series"""))
__snake_case = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[Any] = generator.manual_seed(0 )
_a : str = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """cyberpunk 2077"""
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Any = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : int = """A painting of a squirrel eating a burger """
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCamelCase__ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
UpperCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
UpperCamelCase__ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
UpperCamelCase__ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
UpperCamelCase__ = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) )
UpperCAmelCase__ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCAmelCase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCAmelCase_ ( __A = 100 ) -> Union[str, Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase__ ))
@pytest.mark.parametrize("hand, expected", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = PokerHand(UpperCamelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected", UpperCamelCase__ )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected", generate_random_hands() )
def lowerCAmelCase_ ( __A, __A, __A ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS]
UpperCAmelCase__ = poker_hands.copy()
shuffle(UpperCamelCase__ )
UpperCAmelCase__ = chain(sorted(UpperCamelCase__ ) )
for index, hand in enumerate(UpperCamelCase__ ):
assert hand == poker_hands[index]
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=UpperCamelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase__ = True
UpperCAmelCase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
UpperCAmelCase__ = os.path.join(UpperCamelCase__, "poker_hands.txt" )
with open(UpperCamelCase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase__ = line[:14].strip()
UpperCAmelCase__ = line[15:].strip()
UpperCAmelCase__ = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ )
UpperCAmelCase__ = player.compare_with(UpperCamelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( snake_case ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def A_ ( snake_case ):
class _snake_case :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ):
SCREAMING_SNAKE_CASE:Dict = metric_id
class _snake_case :
_A : Dict = [MetricMock(snake_case_ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def __UpperCamelCase ( self : Union[str, Any] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
if "tmp_path" in args:
SCREAMING_SNAKE_CASE:Tuple = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(UpperCamelCase__ , match="https://huggingface.co/docs/evaluate" ):
func(*UpperCamelCase__ )
| 139 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
import math
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
a_ = '''Enter the base and the power separated by a comma: '''
a_, a_ = map(int, input(prompt).split(''','''))
a_, a_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
a_ = res(xa, ya)
a_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__snake_case = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__, UpperCamelCase__ )
requires_backends(UpperCamelCase__, '''sklearn''' )
return (preds == labels).mean()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__, UpperCamelCase__ )
requires_backends(UpperCamelCase__, '''sklearn''' )
_a = simple_accuracy(UpperCamelCase__, UpperCamelCase__ )
_a = fa_score(y_true=UpperCamelCase__, y_pred=UpperCamelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict ):
"""simple docstring"""
warnings.warn(UpperCamelCase__, UpperCamelCase__ )
requires_backends(UpperCamelCase__, '''sklearn''' )
_a = pearsonr(UpperCamelCase__, UpperCamelCase__ )[0]
_a = spearmanr(UpperCamelCase__, UpperCamelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__, UpperCamelCase__ )
requires_backends(UpperCamelCase__, '''sklearn''' )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f'Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(UpperCamelCase__, UpperCamelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(UpperCamelCase__, UpperCamelCase__ )
elif task_name == "qqp":
return acc_and_fa(UpperCamelCase__, UpperCamelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ )
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
warnings.warn(UpperCamelCase__, UpperCamelCase__ )
requires_backends(UpperCamelCase__, '''sklearn''' )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(UpperCamelCase__, UpperCamelCase__ )}
else:
raise KeyError(UpperCamelCase__ ) | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
"""simple docstring"""
import cva
import numpy as np
class UpperCAmelCase_ :
def __init__( self : Optional[int] , snake_case_ : float , snake_case_ : int ) -> Dict:
'''simple docstring'''
if k in (0.04, 0.06):
A__ = k
A__ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.k )
def __magic_name__ ( self : int , snake_case_ : str ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
A__ = cva.imread(UpperCAmelCase__ , 0 )
A__ = img.shape
A__ = []
A__ = img.copy()
A__ = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
A__ = np.gradient(UpperCAmelCase__ )
A__ = dx**2
A__ = dy**2
A__ = dx * dy
A__ = 0.04
A__ = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
A__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = (wxx * wyy) - (wxy**2)
A__ = wxx + wyy
A__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = HarrisCorner(0.04, 3)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 247 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :int = 10 , lowerCAmelCase_ :List[str] = 22 )->int:
'''simple docstring'''
snake_case_ = range(1 , UpperCamelCase__ )
snake_case_ = range(1 , UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 159 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A (snake_case_):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any="None" , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=None , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = DebertaVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
snake_case_ = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
snake_case_ = model(UpperCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
snake_case_ = DebertaVaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) ->str:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaVaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int ) ->Any:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaVaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = DebertaVaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = DebertaVaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
snake_case_
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A (snake_case_ , snake_case_ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase: Optional[int] = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Tuple = True
__lowercase: Any = False
__lowercase: Tuple = False
__lowercase: int = False
__lowercase: str = False
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = DebertaVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase__ )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCAmelCase__ )
@slow
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
snake_case_ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 347 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 0 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = 42
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = {}
lowercase_ = []
lowercase_ = 1
lowercase_ = [1, 2]
lowercase_ = {"""a""": 1, """b""": 2}
lowercase_ = {"""a""": [1, 2], """b""": [3, 4]}
lowercase_ = {"""a""": {"""1""": 1}, """b""": 2}
lowercase_ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase_ = {}
lowercase_ = []
lowercase_ = 2
lowercase_ = [2, 3]
lowercase_ = {"""a""": 2, """b""": 3}
lowercase_ = {"""a""": [2, 3], """b""": [4, 5]}
lowercase_ = {"""a""": {"""1""": 2}, """b""": 3}
lowercase_ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__)
lowercase_ = 2
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
lowercase_ = {"""a""": np.eye(2), """b""": np.zeros(3), """c""": np.ones(2)}
lowercase_ = {"""a""": 2, """b""": 0, """c""": 2}
lowercase_ = {
"""a""": np.eye(2).astype(UpperCAmelCase__),
"""b""": np.zeros(3).astype(UpperCAmelCase__),
"""c""": np.ones(2).astype(UpperCAmelCase__),
}
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase__): # can't pickle a local lambda
map_nested(lambda lowerCAmelCase_: x + 1 , UpperCAmelCase__ , num_proc=UpperCAmelCase__)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = {"""a""": 1, """b""": 2}
lowercase_ = {"""a""": 3, """b""": 4}
lowercase_ = {"""a""": 5, """b""": 6}
lowercase_ = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)) , UpperCAmelCase__)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
lowercase__ = '''bar'''
lowercase_ = Foo()
self.assertEqual(foo.my_attr , """bar""")
with temporary_assignment(UpperCAmelCase__ , """my_attr""" , """BAR"""):
self.assertEqual(foo.my_attr , """BAR""")
self.assertEqual(foo.my_attr , """bar""")
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
lowercase_ = {F'''{i}''': i for i in range(UpperCamelCase__ )}
lowercase_ = map_nested(lambda __lowerCAmelCase : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
@require_tf
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowercase_ = layers.Dense(2)
def gen_random_output():
lowercase_ = tf.random.uniform((1, 3))
return model(UpperCAmelCase__).numpy()
with temp_seed(4_2 , set_tensorflow=UpperCAmelCase__):
lowercase_ = gen_random_output()
with temp_seed(4_2 , set_tensorflow=UpperCAmelCase__):
lowercase_ = gen_random_output()
lowercase_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
import torch
def gen_random_output():
lowercase_ = torch.nn.Linear(3 , 2)
lowercase_ = torch.rand(1 , 3)
return model(UpperCAmelCase__).detach().numpy()
with temp_seed(4_2 , set_pytorch=UpperCAmelCase__):
lowercase_ = gen_random_output()
with temp_seed(4_2 , set_pytorch=UpperCAmelCase__):
lowercase_ = gen_random_output()
lowercase_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(4_2):
lowercase_ = gen_random_output()
with temp_seed(4_2):
lowercase_ = gen_random_output()
lowercase_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("""input_data""" , [{}] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = NestedDataStructure(UpperCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = NestedDataStructure(UpperCamelCase__ ).flatten()
assert output == expected_output
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = A(x=1 , y="""foobar""" )
lowercase_ = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCamelCase__ ) == expected_output
lowercase_ = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
lowercase_ = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCamelCase__ ) == expected_output
with pytest.raises(UpperCamelCase__ ):
asdict([1, A(x=10 , y="""foo""" )] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return text.split()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
with Pool(2 ) as pool:
lowercase_ = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase_ = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase_ = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase__ )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCamelCase__ ) == 4
| 136 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294 | 0 |
from typing import Any
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
if not input_list:
return []
__snake_case: Optional[Any] = [input_list.count(UpperCamelCase__) for value in input_list]
__snake_case: Dict = max(UpperCamelCase__) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase__) if value == y})
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Any = SwinvaConfig()
UpperCamelCase :Optional[int] = swinva_name.split('''_''' )
UpperCamelCase :Tuple = name_split[1]
if "to" in name_split[3]:
UpperCamelCase :Union[str, Any] = int(name_split[3][-3:] )
else:
UpperCamelCase :int = int(name_split[3] )
if "to" in name_split[2]:
UpperCamelCase :Dict = int(name_split[2][-2:] )
else:
UpperCamelCase :Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
UpperCamelCase :Any = 96
UpperCamelCase :Optional[int] = (2, 2, 6, 2)
UpperCamelCase :Dict = (3, 6, 12, 24)
elif model_size == "small":
UpperCamelCase :Optional[int] = 96
UpperCamelCase :List[Any] = (2, 2, 18, 2)
UpperCamelCase :int = (3, 6, 12, 24)
elif model_size == "base":
UpperCamelCase :str = 128
UpperCamelCase :int = (2, 2, 18, 2)
UpperCamelCase :Any = (4, 8, 16, 32)
else:
UpperCamelCase :Tuple = 192
UpperCamelCase :List[Any] = (2, 2, 18, 2)
UpperCamelCase :Union[str, Any] = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCamelCase :Optional[int] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCamelCase :str = 21841
UpperCamelCase :List[Any] = """huggingface/label-files"""
UpperCamelCase :List[str] = """imagenet-22k-id2label.json"""
UpperCamelCase :Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase :int = idalabel
UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase :str = 1000
UpperCamelCase :int = """huggingface/label-files"""
UpperCamelCase :Optional[int] = """imagenet-1k-id2label.json"""
UpperCamelCase :str = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :Tuple = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase :Any = idalabel
UpperCamelCase :int = {v: k for k, v in idalabel.items()}
UpperCamelCase :List[Any] = img_size
UpperCamelCase :List[Any] = num_classes
UpperCamelCase :List[str] = embed_dim
UpperCamelCase :Dict = depths
UpperCamelCase :List[str] = num_heads
UpperCamelCase :Optional[Any] = window_size
return config
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
if "patch_embed.proj" in name:
UpperCamelCase :str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase :Union[str, Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCamelCase :List[Any] = """encoder.""" + name
if "attn.proj" in name:
UpperCamelCase :Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase :List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase :str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase :List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase :Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase :Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
UpperCamelCase :Tuple = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
UpperCamelCase :str = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
UpperCamelCase :int = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
UpperCamelCase :Union[str, Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
UpperCamelCase :int = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase :List[Any] = """layernorm.bias"""
if "head" in name:
UpperCamelCase :Dict = name.replace('''head''' , '''classifier''' )
else:
UpperCamelCase :Union[str, Any] = """swinv2.""" + name
return name
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
for key in orig_state_dict.copy().keys():
UpperCamelCase :Optional[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "mask" in key:
continue
elif "qkv" in key:
UpperCamelCase :str = key.split('''.''' )
UpperCamelCase :Union[str, Any] = int(key_split[1] )
UpperCamelCase :Tuple = int(key_split[3] )
UpperCamelCase :List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase :Union[str, Any] = val[:dim, :]
UpperCamelCase :str = val[dim : dim * 2, :]
UpperCamelCase :Union[str, Any] = val[-dim:, :]
else:
UpperCamelCase :Union[str, Any] = val[:dim]
UpperCamelCase :Union[str, Any] = val[
dim : dim * 2
]
UpperCamelCase :int = val[-dim:]
else:
UpperCamelCase :Union[str, Any] = val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :int = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
UpperCamelCase :Union[str, Any] = get_swinva_config(UpperCamelCase__ )
UpperCamelCase :Tuple = SwinvaForImageClassification(UpperCamelCase__ )
model.eval()
UpperCamelCase :int = convert_state_dict(timm_model.state_dict() , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
UpperCamelCase :List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
UpperCamelCase :str = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
UpperCamelCase :Any = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
UpperCamelCase :Optional[Any] = timm_model(inputs['''pixel_values'''] )
UpperCamelCase :Dict = model(**UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__snake_case = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 259 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_ ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase__ = 6
UpperCAmelCase__ = 128
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase__ = 12
UpperCAmelCase__ = 192
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
UpperCAmelCase__ = window_size
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
return config
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
if "encoder.mask_token" in name:
UpperCAmelCase__ = name.replace("encoder.mask_token", "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase__ = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase__ = name.replace("encoder.patch_embed.norm", "embeddings.norm" )
if "attn.proj" in name:
UpperCAmelCase__ = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
UpperCAmelCase__ = name.replace("attn", "attention.self" )
if "norm1" in name:
UpperCAmelCase__ = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
UpperCAmelCase__ = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__ = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__ = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase__ = """layernorm.weight"""
if name == "encoder.norm.bias":
UpperCAmelCase__ = """layernorm.bias"""
if "decoder" in name:
pass
else:
UpperCAmelCase__ = """swin.""" + name
return name
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(UpperCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase__ = key.split("." )
UpperCAmelCase__ = int(key_split[2] )
UpperCAmelCase__ = int(key_split[4] )
UpperCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
UpperCAmelCase__ = val
return orig_state_dict
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = torch.load(UpperCamelCase__, map_location="cpu" )["""model"""]
UpperCAmelCase__ = get_swin_config(UpperCamelCase__ )
UpperCAmelCase__ = SwinForMaskedImageModeling(UpperCamelCase__ )
model.eval()
UpperCAmelCase__ = convert_state_dict(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
UpperCAmelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ = ViTImageProcessor(size={"height": 192, "width": 192} )
UpperCAmelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
UpperCAmelCase__ = image_processor(images=UpperCamelCase__, return_tensors="pt" )
with torch.no_grad():
UpperCAmelCase__ = model(**UpperCamelCase__ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( snake_case_ ):
_A : int = (IPNDMScheduler,)
_A : int = (('''num_inference_steps''', 5_0),)
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Tuple ):
SCREAMING_SNAKE_CASE:Optional[int] = {"""num_train_timesteps""": 1_000}
config.update(**UpperCAmelCase__ )
return config
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Any=0 ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:Dict = kwargs.pop("num_inference_steps" ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE:Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE:Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE:Any = dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE:str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE:Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE:List[Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:str = new_scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE:Optional[int] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:Tuple = new_scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Tuple ):
pass
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:Optional[Any] = kwargs.pop("num_inference_steps" ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE:List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE:Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE:Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE:Any = dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE:List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE:Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE:List[str] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:Tuple = new_scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:int = new_scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : str ,**SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE:Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:int = 10
SCREAMING_SNAKE_CASE:List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE:str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE:str = model(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:List[Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE:Union[str, Any] = model(UpperCAmelCase__ ,UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Any = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ).prev_sample
return sample
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Dict = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE:int = kwargs.pop("num_inference_steps" ,UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE:Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE:Tuple = scheduler_class(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE:Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE:Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ ,"set_timesteps" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ ,"set_timesteps" ):
SCREAMING_SNAKE_CASE:List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE:Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE:Optional[Any] = dummy_past_residuals[:]
SCREAMING_SNAKE_CASE:Optional[Any] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE:str = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE:Optional[int] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:Union[str, Any] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
SCREAMING_SNAKE_CASE:Tuple = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
SCREAMING_SNAKE_CASE:List[str] = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __UpperCamelCase ( self : List[str] ):
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ,time_step=UpperCAmelCase__ )
def __UpperCamelCase ( self : List[str] ):
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ ,time_step=UpperCAmelCase__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:str = self.full_loop()
SCREAMING_SNAKE_CASE:List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 139 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294 | 0 |
def _a ( UpperCamelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = [int(UpperCamelCase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(UpperCamelCase__ ) == 4 and all(0 <= int(UpperCamelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
a_ = input().strip()
a_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"{ip} is a {valid_or_invalid} IP v4 address.")
| 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case : Optional[Any] = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case : Any = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case : int = val
return f[i][j]
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : int , lowercase__ : Dict ):
snake_case : Optional[int] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case : Any = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Tuple ):
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
snake_case : Union[str, Any] = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
snake_case : Union[str, Any] = (
"""The number of weights must be the same as the number of values.\n"""
F'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
snake_case : Tuple = (
"""All weights must be integers but got weight of """
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
snake_case : List[Any] = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : set = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : int , lowercase__ : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any] ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
__A = [3, 2, 4, 4]
__A = [4, 3, 2, 3]
__A = 4
__A = 6
__A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__A , __A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__A , __A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 148 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : int=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_a = []
for old_item in old_list:
_a = old_item.replace('''in_layers.0''', '''norm1''' )
_a = new_item.replace('''in_layers.2''', '''conv1''' )
_a = new_item.replace('''out_layers.0''', '''norm2''' )
_a = new_item.replace('''out_layers.3''', '''conv2''' )
_a = new_item.replace('''emb_layers.1''', '''time_emb_proj''' )
_a = new_item.replace('''skip_connection''', '''conv_shortcut''' )
_a = shave_segments(UpperCamelCase__, n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
_a = []
for old_item in old_list:
_a = old_item
_a = new_item.replace('''norm.weight''', '''group_norm.weight''' )
_a = new_item.replace('''norm.bias''', '''group_norm.bias''' )
_a = new_item.replace('''proj_out.weight''', '''proj_attn.weight''' )
_a = new_item.replace('''proj_out.bias''', '''proj_attn.bias''' )
_a = shave_segments(UpperCamelCase__, n_shave_prefix_segments=UpperCamelCase__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Dict=None, _lowerCAmelCase : Union[str, Any]=None, _lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
assert isinstance(UpperCamelCase__, UpperCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a = old_checkpoint[path]
_a = old_tensor.shape[0] // 3
_a = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_a = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a = old_tensor.split(channels // num_heads, dim=1 )
_a = query.reshape(UpperCamelCase__ )
_a = key.reshape(UpperCamelCase__ )
_a = value.reshape(UpperCamelCase__ )
for path in paths:
_a = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a = new_path.replace('''middle_block.0''', '''mid_block.resnets.0''' )
_a = new_path.replace('''middle_block.1''', '''mid_block.attentions.0''' )
_a = new_path.replace('''middle_block.2''', '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a = new_path.replace(replacement['''old'''], replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a = old_checkpoint[path["""old"""]][:, :, 0]
else:
_a = old_checkpoint[path["""old"""]]
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = {}
_a = checkpoint["""time_embed.0.weight"""]
_a = checkpoint["""time_embed.0.bias"""]
_a = checkpoint["""time_embed.2.weight"""]
_a = checkpoint["""time_embed.2.bias"""]
_a = checkpoint["""input_blocks.0.0.weight"""]
_a = checkpoint["""input_blocks.0.0.bias"""]
_a = checkpoint["""out.0.weight"""]
_a = checkpoint["""out.0.bias"""]
_a = checkpoint["""out.2.weight"""]
_a = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_a = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the middle blocks only
_a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_a = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the output blocks only
_a = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_a = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCamelCase__ )
}
for i in range(1, UpperCamelCase__ ):
_a = (i - 1) // (config["""num_res_blocks"""] + 1)
_a = (i - 1) % (config["""num_res_blocks"""] + 1)
_a = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
_a = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
_a = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
_a = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
_a = renew_resnet_paths(UpperCamelCase__ )
_a = {"""old""": f'input_blocks.{i}.0', """new""": f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
_a = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path, resnet_op], config=UpperCamelCase__ )
if len(UpperCamelCase__ ):
_a = renew_attention_paths(UpperCamelCase__ )
_a = {
"""old""": f'input_blocks.{i}.1',
"""new""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_a = {
f'input_blocks.{i}.1.qkv.bias': {
"""key""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"""query""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"""value""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
"""key""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"""query""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"""value""": f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], attention_paths_to_split=UpperCamelCase__, config=UpperCamelCase__, )
_a = middle_blocks[0]
_a = middle_blocks[1]
_a = middle_blocks[2]
_a = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, config=UpperCamelCase__ )
_a = renew_resnet_paths(UpperCamelCase__ )
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, config=UpperCamelCase__ )
_a = renew_attention_paths(UpperCamelCase__ )
_a = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, attention_paths_to_split=UpperCamelCase__, config=UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
_a = i // (config["""num_res_blocks"""] + 1)
_a = i % (config["""num_res_blocks"""] + 1)
_a = [shave_segments(UpperCamelCase__, 2 ) for name in output_blocks[i]]
_a = {}
for layer in output_block_layers:
_a = layer.split('''.''' )[0], shave_segments(UpperCamelCase__, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase__ )
else:
_a = [layer_name]
if len(UpperCamelCase__ ) > 1:
_a = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
_a = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
_a = renew_resnet_paths(UpperCamelCase__ )
_a = renew_resnet_paths(UpperCamelCase__ )
_a = {"""old""": f'output_blocks.{i}.0', """new""": f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], config=UpperCamelCase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_a = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
_a = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase__ ) == 2:
_a = []
if len(UpperCamelCase__ ):
_a = renew_attention_paths(UpperCamelCase__ )
_a = {
"""old""": f'output_blocks.{i}.1',
"""new""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
_a = {
f'output_blocks.{i}.1.qkv.bias': {
"""key""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"""query""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"""value""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
"""key""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"""query""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"""value""": f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, additional_replacements=[meta_path], attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None, config=UpperCamelCase__, )
else:
_a = renew_resnet_paths(UpperCamelCase__, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a = """.""".join(['''output_blocks''', str(UpperCamelCase__ ), path['''old''']] )
_a = """.""".join(['''up_blocks''', str(UpperCamelCase__ ), '''resnets''', str(UpperCamelCase__ ), path['''new''']] )
_a = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__snake_case = parser.parse_args()
__snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__snake_case = json.loads(f.read())
__snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 320 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 0 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class UpperCAmelCase_ ( snake_case_ ):
lowercase__ = '''mvp'''
lowercase__ = ['''past_key_values''']
lowercase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , snake_case_ : List[str]=50_267 , snake_case_ : Optional[Any]=1_024 , snake_case_ : Tuple=12 , snake_case_ : Optional[Any]=4_096 , snake_case_ : int=16 , snake_case_ : Tuple=12 , snake_case_ : int=4_096 , snake_case_ : List[Any]=16 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple="gelu" , snake_case_ : Union[str, Any]=1_024 , snake_case_ : List[str]=0.1 , snake_case_ : Any=0.0 , snake_case_ : Dict=0.0 , snake_case_ : Tuple=0.02 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=False , snake_case_ : int=True , snake_case_ : Tuple=1 , snake_case_ : Dict=0 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=True , snake_case_ : Tuple=2 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=False , snake_case_ : Dict=100 , snake_case_ : Union[str, Any]=800 , **snake_case_ : Dict , ) -> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = classifier_dropout
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
A__ = use_prompt
A__ = prompt_length
A__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCAmelCase__ ):
A__ = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
| 247 |
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case_ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case_ = get_activation("gelu" )
snake_case_ = get_activation("gelu_10" )
snake_case_ = torch_builtin(UpperCAmelCase__ )
snake_case_ = geluaa(UpperCAmelCase__ )
snake_case_ = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation("bogus" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ = get_activation("gelu" )
snake_case_ = 1
snake_case_ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
snake_case_ = acta.a
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ = tf.shape(UpperCamelCase__ )
if tensor.shape == tf.TensorShape(UpperCamelCase__ ):
return dynamic
lowercase_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase__ )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> Dict:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCamelCase__ , name=UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-5 , __lowerCAmelCase=-1 ) -> Any:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ = tf.nn.moments(UpperCamelCase__ , axes=[axis] , keepdims=UpperCamelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ = [1] * inputs.shape.rank
lowercase_ = shape_list(UpperCamelCase__ )[axis]
lowercase_ = tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ = tf.nn.batch_normalization(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , offset=UpperCamelCase__ , scale=UpperCamelCase__ , variance_epsilon=UpperCamelCase__ , )
return outputs
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 ) -> str:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ = tf.shape(UpperCamelCase__ )
lowercase_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , tf.Tensor ):
lowercase_ = tf.convert_to_tensor(UpperCamelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" ) -> Union[str, Any]:
'''simple docstring'''
tf.debugging.assert_less(
UpperCamelCase__ , tf.cast(UpperCamelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ = [x for x in data if len(UpperCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ = np.asarray(UpperCamelCase__ )
lowercase_ = 1
lowercase_ = np.array_split(UpperCamelCase__ , UpperCamelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ = np.array_split(UpperCamelCase__ , UpperCamelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase__ ):
lowercase_ = chunk_data
else:
lowercase_ = data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
if name in group.attrs:
lowercase_ = [n.decode("""utf8""" ) if hasattr(UpperCamelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ = []
lowercase_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCamelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(UpperCamelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase__ )
| 136 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
# preprocessing the first row
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase__)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase__)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( snake_case_, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =ProphetNetTokenizer
UpperCamelCase_ : Optional[Any] =False
def UpperCAmelCase ( self ) -> str:
super().setUp()
UpperCamelCase :str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :str = """UNwant\u00E9d,running"""
UpperCamelCase :Union[str, Any] = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :int = self.tokenizer_class(self.vocab_file )
UpperCamelCase :List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCAmelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :List[Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Tuple = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Tuple = BasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCamelCase :int = {}
for i, token in enumerate(UpperCAmelCase__ ):
UpperCamelCase :Optional[Any] = i
UpperCamelCase :Optional[int] = WordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCamelCase :Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase :Optional[Any] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
UpperCamelCase :str = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase :Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCAmelCase ( self ) -> Dict:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase ( self ) -> str:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase ( self ) -> Optional[int]:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
UpperCamelCase :Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCamelCase :Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCamelCase :List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCamelCase :int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[Any] = generator.manual_seed(0 )
_a : str = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """cyberpunk 2077"""
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Any = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : int = """A painting of a squirrel eating a burger """
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A ( snake_case_ ):
__UpperCAmelCase : str = '''glpn'''
def __init__(self : List[Any] , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=[2, 2, 2, 2] , __UpperCAmelCase : Tuple=[8, 4, 2, 1] , __UpperCAmelCase : Optional[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , __UpperCAmelCase : Dict=[7, 3, 3, 3] , __UpperCAmelCase : int=[4, 2, 2, 2] , __UpperCAmelCase : Optional[int]=[1, 2, 5, 8] , __UpperCAmelCase : List[str]=[4, 4, 4, 4] , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[Any]=1E-6 , __UpperCAmelCase : Tuple=6_4 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Union[str, Any]=-1 , **__UpperCAmelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = num_encoder_blocks
UpperCAmelCase__ = depths
UpperCAmelCase__ = sr_ratios
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = strides
UpperCAmelCase__ = mlp_ratios
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = decoder_hidden_size
UpperCAmelCase__ = max_depth
UpperCAmelCase__ = head_in_index
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A_ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
A_ = f'''https://www.google.com/search?q={query}&num=100'''
A_ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
A_ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
A_ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 139 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase__ ( snake_case_, snake_case_ ):
a_ ='''nat'''
a_ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=[3, 4, 6, 5] , __UpperCAmelCase=[2, 4, 8, 16] , __UpperCAmelCase=7 , __UpperCAmelCase=3.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = len(UpperCAmelCase__ )
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = """ylacombe/bark-small"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Dict = """en_speaker_1"""
snake_case : Any = """This is a test string"""
snake_case : str = """speaker_embeddings_path.json"""
snake_case : Union[str, Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[Any] = BarkProcessor(tokenizer=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case : Dict = 35
snake_case : Any = 2
snake_case : str = 8
snake_case : Tuple = {
"""semantic_prompt""": np.ones(UpperCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case : Optional[int] = processor(text=self.input_string , voice_preset=UpperCAmelCase__ )
snake_case : Any = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case : str = os.path.join(self.tmpdirname , "file.npz" )
np.savez(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : str = processor(text=self.input_string , voice_preset=UpperCAmelCase__ )
snake_case : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Union[str, Any] = BarkProcessor(tokenizer=UpperCAmelCase__ )
snake_case : Any = processor(text=self.input_string )
snake_case : List[str] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = XLMTokenizer
A_ : List[str] = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_a = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_a = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a = """lower newer"""
_a = """lower newer"""
return input_text, output_text
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = XLMTokenizer(self.vocab_file , self.merges_file )
_a = """lower"""
_a = ["""low""", """er</w>"""]
_a = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokens + ["""<unk>"""]
_a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def _UpperCAmelCase ( self ) -> Dict:
_a = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE = " \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
A__ = self.diffusers_dir
shutil.copy(
os.path.join(UpperCAmelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __magic_name__ ( self : int ) -> Dict:
'''simple docstring'''
A__ = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def __magic_name__ ( self : Any , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=None ) -> Dict:
'''simple docstring'''
A__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
A__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A__ = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__ )
A__ = os.path.join(self.diffusers_dir , "new_code.py" )
with open(UpperCAmelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase__ )
with open(UpperCAmelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCAmelCase__ )
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCAmelCase__ ) , )
# Copy consistency with a really long name
A__ = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , UpperCAmelCase__ , UpperCAmelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCAmelCase__ , overwrite_result=re.sub("DDPM" , "Test" , UpperCAmelCase__ ) , )
| 247 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( )->Tuple:
'''simple docstring'''
snake_case_ = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
snake_case_ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("RGB" )
return image
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple )->str:
'''simple docstring'''
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :List[str] )->Dict:
'''simple docstring'''
snake_case_ = dct.pop(UpperCamelCase__ )
snake_case_ = val
def _lowerCAmelCase ( lowerCAmelCase_ :Tuple , lowerCAmelCase_ :Optional[int] )->Any:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
snake_case_ = qkv_bias
def _lowerCAmelCase ( lowerCAmelCase_ :Dict )->int:
'''simple docstring'''
snake_case_ = 364 if """coco""" in model_name else 224
snake_case_ = InstructBlipVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
snake_case_ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
snake_case_ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
snake_case_ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
snake_case_ = InstructBlipConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ , qformer_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Union[str, Any]=None , lowerCAmelCase_ :Dict=False )->Union[str, Any]:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
snake_case_ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
snake_case_ = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
snake_case_ = get_blipa_config(UpperCamelCase__ )
snake_case_ = InstructBlipForConditionalGeneration(UpperCamelCase__ ).eval()
snake_case_ = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
snake_case_ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
snake_case_ = """cuda:1""" if torch.cuda.is_available() else """cpu"""
snake_case_ = """cuda:2""" if torch.cuda.is_available() else """cpu"""
snake_case_ = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("Done!" )
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(UpperCamelCase__ )
if key.startswith("Qformer.bert" ):
snake_case_ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
snake_case_ = key.replace("self" , "attention" )
if "llm_proj" in key:
snake_case_ = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
snake_case_ = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
snake_case_ = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
snake_case_ = key.replace("t5" , "language" )
snake_case_ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
snake_case_ = load_demo_image()
snake_case_ = """What is unusual about this image?"""
# create processor
snake_case_ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
snake_case_ = InstructBlipProcessor(
image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ , )
snake_case_ = processor(images=UpperCamelCase__ , text=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
snake_case_ = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
snake_case_ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
snake_case_ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
snake_case_ = hf_model(**UpperCamelCase__ ).logits
else:
snake_case_ = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
snake_case_ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(UpperCamelCase__ )
snake_case_ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
snake_case_ = hf_model(**UpperCamelCase__ , labels=UpperCamelCase__ ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
snake_case_ = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase__ , atol=UpperCamelCase__ )
print("Looks ok!" )
print("Generating with original model..." )
snake_case_ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
snake_case_ = hf_model.generate(
**UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
snake_case_ = 2
print("Original generation:" , UpperCamelCase__ )
snake_case_ = processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
snake_case_ = [text.strip() for text in output_text]
print("HF generation:" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :List[Any] = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE :Dict = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 159 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Union[str, Any] = TypeVar('_T')
class __A (Generic[_T]):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Iterable[_T] | None = None ) ->None:
"""simple docstring"""
snake_case_ = list(iterable or [] )
snake_case_ = []
def __len__( self : str ) ->int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) ->str:
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : _T ) ->None:
"""simple docstring"""
self._stacka.append(UpperCAmelCase__ )
def lowerCAmelCase ( self : Optional[Any] ) ->_T:
"""simple docstring"""
snake_case_ = self._stacka.pop
snake_case_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 347 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[Any] ) -> Dict:
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = SamImageProcessor()
_a : int = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : str ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> Dict:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Dict:
_a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
_a : Optional[Any] = self.get_image_processor()
_a : int = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Union[str, Any] = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = [torch.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : Optional[int] = [[683, 1024]]
_a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : int = processor.post_process_masks(
UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : Optional[Any] = [np.ones((1, 3, 5, 5) )]
_a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase__ ):
_a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Any ) -> List[str]:
_a : List[str] = tempfile.mkdtemp()
_a : Any = SamImageProcessor()
_a : Union[str, Any] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> List[str]:
_a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
_a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : int = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def _lowercase ( self : Optional[Any] ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Any = [tf.ones((1, 3, 5, 5) )]
_a : Tuple = [[1764, 2646]]
_a : str = [[683, 1024]]
_a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Union[str, Any] = processor.post_process_masks(
UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_a : List[Any] = [np.ones((1, 3, 5, 5) )]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_a : Dict = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_a : List[Any] = processor.post_process_masks(
UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : str ) -> Optional[Any]:
_a : Optional[Any] = tempfile.mkdtemp()
_a : Dict = SamImageProcessor()
_a : List[str] = SamProcessor(UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def _lowercase ( self : Tuple ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : str ) -> int:
_a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowercase ( self : int ) -> List[Any]:
_a : Optional[Any] = self.get_image_processor()
_a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_a : str = [tf.convert_to_tensor(UpperCAmelCase__ )]
_a : Optional[int] = [torch.tensor(UpperCAmelCase__ )]
_a : Union[str, Any] = [[1764, 2646]]
_a : List[str] = [[683, 1024]]
_a : Optional[int] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" )
_a : List[str] = processor.post_process_masks(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowercase ( self : str ) -> Optional[Any]:
_a : List[Any] = self.get_image_processor()
_a : Any = SamProcessor(image_processor=UpperCAmelCase__ )
_a : Dict = self.prepare_image_inputs()
_a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 294 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = LxmertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ = LxmertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 136 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_snake_case = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = list(state_dict.keys() )
for name in state_dict_keys:
_a : str = state_dict.pop(UpperCamelCase__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_a : Dict = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ )
# ffn -> feed_forward
_a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_a : Optional[int] = """rwkv.""" + name
_a : Any = weight
return state_dict
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_a : Tuple = 5_0_2_7_7
_a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ )
_a : int = len(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
# 2. Build the config
_a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_a : Tuple = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_a : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase__ )
# 3. Download model file then convert state_dict
_a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : List[str] = convert_state_dict(UpperCamelCase__ )
# 4. Split in shards and save
_a , _a : List[str] = shard_checkpoint(UpperCamelCase__ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if index is not None:
_a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
# Save the index as well
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
_a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_a : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 294 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__=2_8123) -> List[str]:
__snake_case: int = [1] * (limit + 1)
for i in range(2 , int(limit**0.5) + 1):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1):
sum_divs[k * i] += k + i
__snake_case: Optional[int] = set()
__snake_case: Union[str, Any] = 0
for n in range(1 , limit + 1):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__)
if not any((n - a in abundants) for a in abundants):
res += n
return res
if __name__ == "__main__":
print(solution())
| 111 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[str]:
_a : Any = """laion/clap-htsat-unfused"""
_a : Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[str] ) -> Optional[int]:
_a : List[str] = self.get_tokenizer()
_a : Any = self.get_feature_extractor()
_a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
_a : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
_a : Optional[int] = self.get_feature_extractor()
_a : Tuple = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = floats_list((3, 1000) )
_a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
_a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Tuple ) -> Optional[int]:
_a : List[str] = self.get_feature_extractor()
_a : Any = self.get_tokenizer()
_a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Optional[int] = """This is a test string"""
_a : Tuple = processor(text=UpperCAmelCase__ )
_a : int = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Any:
_a : str = self.get_feature_extractor()
_a : List[str] = self.get_tokenizer()
_a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : Dict = processor.batch_decode(UpperCAmelCase__ )
_a : Any = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Dict ) -> List[str]:
_a : str = self.get_feature_extractor()
_a : Optional[Any] = self.get_tokenizer()
_a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 294 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__snake_case = logging.getLogger(__name__)
__snake_case = 50 # max width of layer names
__snake_case = 70 # max width of quantizer names
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :Optional[int] = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=UpperCamelCase__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=UpperCamelCase__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=UpperCamelCase__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=UpperCamelCase__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=UpperCamelCase__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=UpperCamelCase__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
if args.calibrator == "max":
UpperCamelCase :List[str] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
UpperCamelCase :Dict = """histogram"""
elif args.calibrator == "mse":
UpperCamelCase :Optional[Any] = """histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
UpperCamelCase :str = QuantDescriptor(num_bits=args.aprec , calib_method=UpperCamelCase__ )
UpperCamelCase :Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(UpperCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(UpperCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(UpperCamelCase__ , ['''embeddings'''] , which='''weight''' , _disabled=UpperCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(UpperCamelCase__ , [''''''] , _disabled=UpperCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(UpperCamelCase__ , args.quant_disable_keyword , _disabled=UpperCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=UpperCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(UpperCamelCase__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=UpperCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(UpperCamelCase__ )
if args.fuse_qkv:
fuse_qkv(UpperCamelCase__ , UpperCamelCase__ )
if args.clip_gelu:
clip_gelu(UpperCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(UpperCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(UpperCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
def fusea(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(UpperCamelCase__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
UpperCamelCase :List[Any] = qq._amax.detach().item()
UpperCamelCase :Union[str, Any] = qk._amax.detach().item()
UpperCamelCase :Tuple = qv._amax.detach().item()
UpperCamelCase :List[Any] = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
qq._amax.fill_(UpperCamelCase__ )
qk._amax.fill_(UpperCamelCase__ )
qv._amax.fill_(UpperCamelCase__ )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
UpperCamelCase :Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=UpperCamelCase__ )
UpperCamelCase :Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
UpperCamelCase :List[str] = mod.weight.shape[0]
UpperCamelCase :Any = mod._weight_quantizer._amax.detach()
UpperCamelCase :List[str] = torch.ones(UpperCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCamelCase :List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCamelCase :List[str] = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCamelCase :Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=UpperCamelCase__ , keepdims=UpperCamelCase__ ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
UpperCamelCase :List[Any] = amax
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=25 , SCREAMING_SNAKE_CASE__ : Tuple=180 , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
if ignore is None:
UpperCamelCase :int = []
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase :Optional[int] = [ignore]
UpperCamelCase :int = 0
for name, mod in model.named_modules():
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
UpperCamelCase :str = max(UpperCamelCase__ , len(UpperCamelCase__ ) )
for name, mod in model.named_modules():
UpperCamelCase :List[Any] = getattr(UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ )
UpperCamelCase :List[Any] = getattr(UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , '''weight''' ):
continue
if type(UpperCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(UpperCamelCase__ ) is str and s in name]:
continue
UpperCamelCase :Tuple = F'''Act:{input_q.extra_repr()}'''
UpperCamelCase :Optional[Any] = F'''Wgt:{weight_q.extra_repr()}'''
UpperCamelCase :Dict = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(UpperCamelCase__ ) <= line_width:
logger.info(UpperCamelCase__ )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :Optional[int] = 0
for name, mod in model.named_modules():
if isinstance(UpperCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :Dict = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if quantizer_mod is not None:
assert hasattr(UpperCamelCase__ , UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]="both" , **SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCamelCase :Any = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_input_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(UpperCamelCase__ , UpperCamelCase__ , '''_weight_quantizer''' , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ):
for name, mod in model.named_modules():
if hasattr(UpperCamelCase__ , '''_input_quantizer''' ) or hasattr(UpperCamelCase__ , '''_weight_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
set_quantizers(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase :Dict = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
logger.info(UpperCamelCase__ )
| 259 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 0 |
from typing import Any
class A :
def __init__(self : List[str] , __UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = data
UpperCAmelCase__ = None
def __repr__(self : Union[str, Any] ) -> str:
"""simple docstring"""
return f"""Node({self.data})"""
class A :
def __init__(self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = None
def __iter__(self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.head
while node:
yield node.data
UpperCAmelCase__ = node.next
def __len__(self : Optional[Any] ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__(self : Optional[int] ) -> str:
"""simple docstring"""
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __getitem__(self : int , __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
UpperCAmelCase__ = self.head
for _ in range(UpperCAmelCase__ ):
UpperCAmelCase__ = current.next
UpperCAmelCase__ = data
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , UpperCAmelCase__ )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , UpperCAmelCase__ )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
UpperCAmelCase__ = Node(UpperCAmelCase__ )
if self.head is None:
UpperCAmelCase__ = new_node
elif index == 0:
UpperCAmelCase__ = self.head # link new_node to head
UpperCAmelCase__ = new_node
else:
UpperCAmelCase__ = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = new_node
def lowercase_ (self : str ) -> None: # print every node data
"""simple docstring"""
print(self )
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase_ (self : Union[str, Any] ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase_ (self : str , __UpperCAmelCase : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
UpperCAmelCase__ = self.head # default first node
if index == 0:
UpperCAmelCase__ = self.head.next
else:
UpperCAmelCase__ = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next
UpperCAmelCase__ = temp.next.next
return delete_node.data
def lowercase_ (self : Optional[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def lowercase_ (self : List[str] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = None
UpperCAmelCase__ = self.head
while current:
# Store the current node's next node.
UpperCAmelCase__ = current.next
# Make the current node's next point backwards
UpperCAmelCase__ = prev
# Make the previous node be the current node
UpperCAmelCase__ = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase__ = next_node
# Return prev in order to put the head at the end
UpperCAmelCase__ = prev
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase__ ) == i
linked_list.insert_nth(UpperCamelCase__, i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase__ ) == 9
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
UpperCAmelCase__ = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(-8, 1 ) )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = [
-9,
100,
Node(77_345_112 ),
"""dlrow olleH""",
7,
5_555,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase__ = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase__ )
assert (
str(UpperCamelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
UpperCAmelCase__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(UpperCamelCase__ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
UpperCAmelCase__ = input("Enter New Value: " ).strip()
print("New list:" )
print(UpperCamelCase__ )
print(f"""length of linked_list is : {len(UpperCamelCase__ )}""" )
if __name__ == "__main__":
main()
| 65 |
"""simple docstring"""
import unittest
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
'''simple docstring'''
_a : List[Any] = np.shape(UpperCamelCase__ )
_a : Any = np.shape(UpperCamelCase__ )
_a : Union[str, Any] = np.shape(UpperCamelCase__ )
if shape_a[0] != shape_b[0]:
_a : int = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(UpperCamelCase__ )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(UpperCamelCase__ )
_a : int = pseudo_inv
if a_inv is None:
try:
_a : Optional[int] = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : int ) -> None:
_a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[int] = np.array([[2, 1], [6, 3]] )
_a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = np.block([[a, b], [b.T, c]] )
_a : int = np.linalg.det(UpperCAmelCase__ )
_a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ )
_a : List[Any] = np.linalg.det(UpperCAmelCase__ )
self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s )
def _lowercase ( self : int ) -> None:
_a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> None:
_a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCAmelCase__ ):
schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 294 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() ,encoding="utf-8" ,check=UpperCAmelCase__ ,)
assert hasattr(self ,"env" )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE:List[str] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
SCREAMING_SNAKE_CASE:Dict = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
SCREAMING_SNAKE_CASE:List[str] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
SCREAMING_SNAKE_CASE:Dict = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' ,instance_count=UpperCAmelCase__ ,instance_type=self.instance_type ,debugger_hook_config=UpperCAmelCase__ ,hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=UpperCAmelCase__ ,py_version="py36" ,)
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ):
# create estimator
SCREAMING_SNAKE_CASE:Optional[int] = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE:Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE:Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE:List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE:List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' ,"w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} ,UpperCAmelCase__ )
| 139 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = torch.device('cpu')
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Any = dct.pop(UpperCamelCase__ )
_a : Dict = val
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = []
for k in state_dict.keys():
_a : Any = k
if ".pwconv" in k:
_a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
_a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
_a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
_a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
_a : int = k_new.split(""".""" )
if ls[2].isdigit():
_a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
_a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_a : Optional[int] = 1_0_0_0
_a : Optional[Any] = """huggingface/label-files"""
_a : Optional[Any] = """imagenet-1k-id2label.json"""
_a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Dict = idalabel
_a : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_a : Any = [3, 3, 6, 4]
_a : int = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
_a : Any = [3, 3, 9, 6]
_a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
_a : List[Any] = [4, 3, 1_0, 5]
_a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
_a : List[Any] = [4, 4, 1_2, 6]
_a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
_a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ )
else:
_a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )
_a : int = checkpoint
_a : Optional[Any] = create_rename_keys(UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval()
hf_model.load_state_dict(UpperCamelCase__ )
# prepare test inputs
_a : Any = prepare_img()
_a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
_a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
_a : Dict = get_expected_output(UpperCamelCase__ )
_a : int = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_snake_case = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 294 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 340 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase__ ( unittest.TestCase ):
a__ : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
snake_case : Any = VideoClassificationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ , top_k=2 )
snake_case : List[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for example in examples:
snake_case : Tuple = video_classifier(UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__ , [
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "label": ANY(UpperCAmelCase__ )},
] , )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
snake_case : List[Any] = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
snake_case : int = pipeline(
"video-classification" , model=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , frame_sampling_rate=4 )
snake_case : str = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
snake_case : Tuple = video_classifier(UpperCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
snake_case : Optional[int] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase__ , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
| 148 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 0 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple ):
"""simple docstring"""
return torch.atana(UpperCamelCase__, UpperCamelCase__ ) / math.pi * 2
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = torch.sin(t * math.pi / 2 ) ** 2
_a = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase__, UpperCamelCase__ )
class __lowerCamelCase ( snake_case_ ):
'''simple docstring'''
pass
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
_a = DiffusionAttnUnetaD(UpperCAmelCase__ , n_attn_layers=4 )
_a = deepcopy(self.diffusion )
_a = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase__ )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__snake_case = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__snake_case = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__snake_case = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__snake_case = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__snake_case = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__snake_case = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if name.startswith('''skip''' ):
return name.replace('''skip''', RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__, UpperCamelCase__ ):
return name.replace(UpperCamelCase__, UpperCamelCase__ )
elif name.startswith(UpperCamelCase__ ):
return [name.replace(UpperCamelCase__, UpperCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]=13 ):
"""simple docstring"""
_a = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''', '''time_proj''' )
_a = 0
if string.startswith('''net.3.''' ):
depth += 1
_a = string[6:]
elif string.startswith('''net.''' ):
_a = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
_a = string[7:]
if string.startswith('''main.''' ):
_a = string[5:]
# mid block
if string[:2].isdigit():
_a = string[:2]
_a = string[2:]
else:
_a = string[0]
_a = string[1:]
if depth == max_depth:
_a = MID_NUM_TO_LAYER[layer_num]
_a = """mid_block"""
elif depth > 0 and int(UpperCamelCase__ ) < 7:
_a = DOWN_NUM_TO_LAYER[layer_num]
_a = f'down_blocks.{depth}'
elif depth > 0 and int(UpperCamelCase__ ) > 7:
_a = UP_NUM_TO_LAYER[layer_num]
_a = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
_a = DEPTH_0_TO_LAYER[layer_num]
_a = f'up_blocks.{max_depth - 1}' if int(UpperCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
_a = string_left[1:]
if "resnets" in new_layer:
_a = convert_resconv_naming(UpperCamelCase__ )
elif "attentions" in new_layer:
_a = convert_attn_naming(UpperCamelCase__ )
_a = new_string_left
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
_a = prefix + """.""" + new_layer + """.""" + string_left
else:
_a = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
_a = rename(UpperCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
_a = transform_conv_attns(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
_a = v
return new_state_dict
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if len(UpperCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
_a = v[:, :, 0]
else:
# bias
_a = v
else:
# qkv matrices
_a = v.shape[0]
_a = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_a = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_a = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_a = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
_a = download(UpperCamelCase__ )
_a = MODELS_MAP[model_name]["""sample_rate"""]
_a = MODELS_MAP[model_name]["""sample_size"""]
_a = Object()
_a = sample_size
_a = sample_rate
_a = 0
_a = UNetaDModel(sample_size=UpperCamelCase__, sample_rate=UpperCamelCase__ )
_a = diffusers_model.state_dict()
_a = DiffusionUncond(UpperCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path, map_location=UpperCamelCase__ )['''state_dict'''] )
_a = orig_model.diffusion_ema.eval()
_a = orig_model.state_dict()
_a = rename_orig_weights(UpperCamelCase__ )
_a = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_a = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(UpperCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
_a = value.squeeze()
_a = value
diffusers_model.load_state_dict(UpperCamelCase__ )
_a = 1_00
_a = 33
_a = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ )
_a = torch.manual_seed(UpperCamelCase__ )
_a = torch.randn([1, 2, config.sample_size], generator=UpperCamelCase__ ).to(UpperCamelCase__ )
_a = torch.linspace(1, 0, steps + 1, device=UpperCamelCase__ )[:-1]
_a = get_crash_schedule(UpperCamelCase__ )
_a = DanceDiffusionPipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
_a = torch.manual_seed(33 )
_a = pipe(num_inference_steps=UpperCamelCase__, generator=UpperCamelCase__ ).audios
_a = sampling.iplms_sample(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, {} )
_a = generated.clamp(-1, 1 )
_a = (generated - audio).abs().sum()
_a = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''', UpperCamelCase__ )
print('''Diff max''', UpperCamelCase__ )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__snake_case = parser.parse_args()
main(args) | 320 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_a , _a , _a : Any = equationa
_a , _a , _a : Tuple = equationa
# Calculate the determinants of the matrices
_a : int = aa * ba - aa * ba
_a : str = ca * ba - ca * ba
_a : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : Dict = determinant_x / determinant
_a : str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 294 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
A__ = iter(UpperCamelCase__ )
while True:
A__ = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
A__ = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
A__ = """"""
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
A__ = generate_table(UpperCamelCase__ )
A__ = prepare_input(UpperCamelCase__ )
A__ = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
A__ = generate_table(UpperCamelCase__ )
A__ = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
A__ = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 247 |
"""simple docstring"""
_snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
_snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 | 0 |
from math import factorial
SCREAMING_SNAKE_CASE :int = {str(digit): factorial(digit) for digit in range(10)}
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->int:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(UpperCamelCase__ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] = 60 , lowerCAmelCase_ :int = 1_000_000 )->List[str]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
snake_case_ = 0
# the cached sizes of the previous chains
snake_case_ = {}
for start_chain_element in range(1 , UpperCamelCase__ ):
# The temporary set will contain the elements of the chain
snake_case_ = set()
snake_case_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(UpperCamelCase__ )
chain_set_length += 1
snake_case_ = digit_factorial_sum(UpperCamelCase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _a ( _SCREAMING_SNAKE_CASE = "dhaka" , _SCREAMING_SNAKE_CASE = 5 ) -> Optional[int]:
snake_case_ = min(UpperCamelCase__ , 50 ) # Prevent abuse!
snake_case_ = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
snake_case_ = requests.get("""https://www.google.com/search""" , params=UpperCamelCase__ , headers=UpperCamelCase__ )
snake_case_ = BeautifulSoup(html.text , """html.parser""" )
snake_case_ = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
snake_case_ = json.dumps(UpperCamelCase__ )
snake_case_ = json.loads(UpperCamelCase__ )
snake_case_ = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCamelCase__ , )
if not matched_google_image_data:
return 0
snake_case_ = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCamelCase__ ) , )
snake_case_ = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCamelCase__ , )
for index, fixed_full_res_image in enumerate(UpperCamelCase__ ):
if index >= max_images:
return index
snake_case_ = bytes(UpperCamelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
snake_case_ = bytes(UpperCamelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
snake_case_ = urllib.request.build_opener()
snake_case_ = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(UpperCamelCase__ )
snake_case_ = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
urllib.request.urlretrieve( # noqa: S310
UpperCamelCase__ , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__SCREAMING_SNAKE_CASE : int = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 347 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
_a : Optional[int] = label_idx
def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : Any = mode.value
_a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : int = 1
_a : int = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
_a : str = []
_a : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
_a : List[str] = []
_a : str = []
else:
_a : List[Any] = line.split(""" """ )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]:
_a : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : List[Any] = f.read().splitlines()
if "O" not in labels:
_a : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase ( snake_case_ ):
def __init__( self : Union[str, Any] ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
_a : Optional[int] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = mode.value
_a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" )
_a : List[str] = 1
_a : Optional[Any] = []
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[Any] = []
_a : Any = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict:
_a : Optional[Any] = 0
for sentence in parse_incr(UpperCAmelCase__ ):
_a : List[str] = preds_list[example_id]
_a : str = """"""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 294 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , lowerCAmelCase_ : str = "cpu" , lowerCAmelCase_ : str = "openai/clip-vit-large-patch14"):
"""simple docstring"""
lowercase_ = device
lowercase_ = CLIPTokenizerFast.from_pretrained(UpperCAmelCase__)
lowercase_ = [0.48_145_466, 0.4_578_275, 0.40_821_073]
lowercase_ = [0.26_862_954, 0.26_130_258, 0.27_577_711]
lowercase_ = torchvision.transforms.Normalize(self.image_mean , self.image_std)
lowercase_ = torchvision.transforms.Resize(2_2_4)
lowercase_ = torchvision.transforms.CenterCrop(2_2_4)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.resize(UpperCAmelCase__)
lowercase_ = self.center_crop(UpperCAmelCase__)
lowercase_ = self.normalize(UpperCAmelCase__)
return images
def __call__( self : Dict , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.tokenizer(text=UpperCAmelCase__ , **UpperCAmelCase__)
lowercase_ = self.preprocess_img(UpperCAmelCase__)
lowercase_ = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Optional[int]=0.01 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]="image" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Tuple=False , ):
"""simple docstring"""
super().__init__()
lowercase_ = None
lowercase_ = device if device else get_device()
if vqgan:
lowercase_ = vqgan
else:
lowercase_ = load_vqgan(self.device , conf_path=UpperCAmelCase__ , ckpt_path=UpperCAmelCase__)
self.vqgan.eval()
if clip:
lowercase_ = clip
else:
lowercase_ = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""")
self.clip.to(self.device)
lowercase_ = ProcessorGradientFlow(device=self.device)
lowercase_ = iterations
lowercase_ = lr
lowercase_ = log
lowercase_ = make_grid
lowercase_ = return_val
lowercase_ = quantize
lowercase_ = self.vqgan.decoder.z_shape
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : int=True):
"""simple docstring"""
lowercase_ = []
if output_path is None:
lowercase_ = """./animation.gif"""
if input_path is None:
lowercase_ = self.save_path
lowercase_ = sorted(glob(input_path + """/*"""))
if not len(UpperCAmelCase__):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""")
if len(UpperCAmelCase__) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""")
lowercase_ = total_duration / len(UpperCAmelCase__)
lowercase_ = [frame_duration] * len(UpperCAmelCase__)
if extend_frames:
lowercase_ = 1.5
lowercase_ = 3
for file_name in paths:
if file_name.endswith(""".png"""):
images.append(imageio.imread(UpperCAmelCase__))
imageio.mimsave(UpperCAmelCase__ , UpperCAmelCase__ , duration=UpperCAmelCase__)
print(F'''gif saved to {output_path}''')
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
if not (path or img):
raise ValueError("""Input either path or tensor""")
if img is not None:
raise NotImplementedError
lowercase_ = preprocess(Image.open(UpperCAmelCase__) , target_image_size=2_5_6).to(self.device)
lowercase_ = preprocess_vqgan(UpperCAmelCase__)
lowercase_ = self.vqgan.encode(UpperCAmelCase__)
return z
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.latent.detach().requires_grad_()
lowercase_ = base_latent + transform_vector
if self.quantize:
lowercase_ = self.vqgan.quantize(UpperCAmelCase__)
else:
lowercase_ = trans_latent
return self.vqgan.decode(UpperCAmelCase__)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None):
"""simple docstring"""
lowercase_ = self.clip_preprocessor(text=UpperCAmelCase__ , images=UpperCAmelCase__ , return_tensors="""pt""" , padding=UpperCAmelCase__)
lowercase_ = self.clip(**UpperCAmelCase__)
lowercase_ = clip_outputs.logits_per_image
if weights is not None:
lowercase_ = similarity_logits * weights
return similarity_logits.sum()
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCAmelCase__ , weights=(1 / pos_prompts["""weights"""]))
if neg_prompts:
lowercase_ = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCAmelCase__ , weights=neg_prompts["""weights"""])
else:
lowercase_ = torch.tensor([1] , device=self.device)
lowercase_ = -torch.log(UpperCAmelCase__) + torch.log(UpperCAmelCase__)
return loss
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = torch.randn_like(self.latent , requires_grad=UpperCAmelCase__ , device=self.device)
lowercase_ = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
lowercase_ = self._add_vector(UpperCAmelCase__)
lowercase_ = loop_post_process(UpperCAmelCase__)
lowercase_ = self._get_CLIP_loss(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
print("""CLIP loss""" , UpperCAmelCase__)
if self.log:
wandb.log({"""CLIP Loss""": clip_loss})
clip_loss.backward(retain_graph=UpperCAmelCase__)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
wandb.init(reinit=UpperCAmelCase__ , project="""face-editor""")
wandb.config.update({"""Positive Prompts""": positive_prompts})
wandb.config.update({"""Negative Prompts""": negative_prompts})
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations})
if image_path:
lowercase_ = Image.open(UpperCAmelCase__)
lowercase_ = image.resize((2_5_6, 2_5_6))
wandb.log("""Original Image""" , wandb.Image(UpperCAmelCase__))
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if not prompts:
return []
lowercase_ = []
lowercase_ = []
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
lowercase_ = [prompt.strip() for prompt in prompts.split("""|""")]
for prompt in prompts:
if isinstance(UpperCAmelCase__ , (tuple, list)):
lowercase_ = prompt[0]
lowercase_ = float(prompt[1])
elif ":" in prompt:
lowercase_ = prompt.split(""":""")
lowercase_ = float(UpperCAmelCase__)
else:
lowercase_ = prompt
lowercase_ = 1.0
processed_prompts.append(UpperCAmelCase__)
weights.append(UpperCAmelCase__)
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase__ , device=self.device),
}
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=None , ):
"""simple docstring"""
if image_path:
lowercase_ = self._get_latent(UpperCAmelCase__)
else:
lowercase_ = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
assert pos_prompts, "You must provide at least one positive prompt."
lowercase_ = self.process_prompts(UpperCAmelCase__)
lowercase_ = self.process_prompts(UpperCAmelCase__)
if save_final and save_path is None:
lowercase_ = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""]))
if not os.path.exists(UpperCAmelCase__):
os.makedirs(UpperCAmelCase__)
else:
lowercase_ = save_path + """_""" + get_timestamp()
os.makedirs(UpperCAmelCase__)
lowercase_ = save_path
lowercase_ = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("""Original Image""")
show_pil(custom_to_pil(UpperCAmelCase__))
lowercase_ = loop_post_process(UpperCAmelCase__)
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)):
if show_intermediate:
show_pil(UpperCAmelCase__)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png'''))
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCAmelCase__)})
if show_final:
show_pil(UpperCAmelCase__)
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png'''))
| 136 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCAmelCase : Dict = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Check if the input is valid
if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
UpperCamelCase :Any = equationa
UpperCamelCase :Tuple = equationa
# Calculate the determinants of the matrices
UpperCamelCase :int = aa * ba - aa * ba
UpperCamelCase :str = ca * ba - ca * ba
UpperCamelCase :str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase :Dict = determinant_x / determinant
UpperCamelCase :str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 259 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Tuple ) -> List[Any]:
_a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Optional[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
_a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[Any] = generator.manual_seed(0 )
_a : str = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = """cyberpunk 2077"""
_a : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : Tuple = torch.manual_seed(0 )
_a : Any = pipe.dual_guided(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_a : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : int = """A painting of a squirrel eating a burger """
_a : Tuple = torch.manual_seed(0 )
_a : Union[str, Any] = pipe.text_to_image(
prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_a : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 294 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
return EnvironmentCommand()
class A ( snake_case_ ):
@staticmethod
def lowercase_ (__UpperCAmelCase : ArgumentParser ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=UpperCAmelCase__ )
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = huggingface_hub.__version__
UpperCAmelCase__ = """not installed"""
UpperCAmelCase__ = """NA"""
if is_torch_available():
import torch
UpperCAmelCase__ = torch.__version__
UpperCAmelCase__ = torch.cuda.is_available()
UpperCAmelCase__ = """not installed"""
if is_transformers_available():
import transformers
UpperCAmelCase__ = transformers.__version__
UpperCAmelCase__ = """not installed"""
if is_accelerate_available():
import accelerate
UpperCAmelCase__ = accelerate.__version__
UpperCAmelCase__ = """not installed"""
if is_xformers_available():
import xformers
UpperCAmelCase__ = xformers.__version__
UpperCAmelCase__ = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(UpperCAmelCase__ ) )
return info
@staticmethod
def lowercase_ (__UpperCAmelCase : int ) -> int:
"""simple docstring"""
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
import baseaa
def A_ ( snake_case ):
return baseaa.aaaencode(string.encode("utf-8" ) )
def A_ ( snake_case ):
return baseaa.aaadecode(UpperCamelCase__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = (IPNDMScheduler,)
UpperCamelCase : int = (('''num_inference_steps''', 50),)
def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int:
_a : Optional[int] = {"""num_train_timesteps""": 1000}
config.update(**UpperCAmelCase__ )
return config
def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
_a : Optional[int] = dict(self.forward_default_kwargs )
_a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : Union[str, Any] = 0.1 * sample
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ )
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Optional[Any] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
_a : Optional[Any] = self.dummy_sample
_a : List[Any] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a : Union[str, Any] = self.get_scheduler_config()
_a : Optional[Any] = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_a : Any = dummy_past_residuals[:]
if time_step is None:
_a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase__ )
_a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[:]
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]:
_a : Optional[int] = self.scheduler_classes[0]
_a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ )
_a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ )
_a : int = 10
_a : List[Any] = self.dummy_model()
_a : str = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_a : str = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
return sample
def _lowercase ( self : int ) -> str:
_a : Dict = dict(self.forward_default_kwargs )
_a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ )
for scheduler_class in self.scheduler_classes:
_a : Optional[int] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**UpperCAmelCase__ )
_a : Tuple = self.dummy_sample
_a : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCAmelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ):
_a : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a : Optional[Any] = dummy_past_residuals[:]
_a : Optional[Any] = scheduler.timesteps[5]
_a : str = scheduler.timesteps[6]
_a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
_a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : List[str] ) -> List[str]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : List[str] ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ )
def _lowercase ( self : int ) -> List[Any]:
_a : str = self.full_loop()
_a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 294 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294 | 0 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
a__ : float
a__ : TreeNode | None = None
a__ : TreeNode | None = None
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
# Validation
def is_valid_tree(lowercase__ : Dict ) -> bool:
if node is None:
return True
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(UpperCamelCase__ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Optional[Any] ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , UpperCamelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , UpperCamelCase__ )
)
return is_binary_search_tree_recursive_check(UpperCamelCase__ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Any=False ):
"""simple docstring"""
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a = """"""
else:
_a = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = dct.pop(UpperCamelCase__ )
_a = val
def A_ ( ):
"""simple docstring"""
_a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = DeiTConfig()
# all deit models have fine-tuned heads
_a = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_a = 10_00
_a = """huggingface/label-files"""
_a = """imagenet-1k-id2label.json"""
_a = json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='''dataset''' ), '''r''' ) )
_a = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = int(deit_name[-6:-4] )
_a = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_a = 1_92
_a = 7_68
_a = 12
_a = 3
elif deit_name[9:].startswith('''small''' ):
_a = 3_84
_a = 15_36
_a = 12
_a = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_a = 10_24
_a = 40_96
_a = 24
_a = 16
# load original model from timm
_a = timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_a = timm_model.state_dict()
_a = create_rename_keys(UpperCamelCase__, UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# load HuggingFace model
_a = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
_a = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_a = DeiTImageProcessor(size=UpperCamelCase__, crop_size=config.image_size )
_a = image_processor(images=prepare_img(), return_tensors='''pt''' )
_a = encoding["""pixel_values"""]
_a = model(UpperCamelCase__ )
_a = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__, outputs.logits, atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path) | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=3 , snake_case_ : Optional[int]=18 , snake_case_ : Dict=30 , snake_case_ : int=400 , snake_case_ : str=True , snake_case_ : Dict=None , snake_case_ : Optional[int]=True , snake_case_ : Tuple=None , snake_case_ : str=True , ) -> Dict:
'''simple docstring'''
A__ = size if size is not None else {"""shortest_edge""": 20}
A__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_flip_channel_order
def __magic_name__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case_, unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ = MobileViTImageProcessingTester(self )
@property
def __magic_name__ ( self : Dict ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_flip_channel_order" ) )
def __magic_name__ ( self : str ) -> str:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __magic_name__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self : int ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __magic_name__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 247 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : TransformeraDModel , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : KarrasDiffusionSchedulers , _lowerCAmelCase : Optional[Dict[int, str]] = None , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=UpperCAmelCase__ , vae=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
# create a imagenet -> id dictionary for easier use
snake_case_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
snake_case_ = int(UpperCAmelCase__ )
snake_case_ = dict(sorted(self.labels.items() ) )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ = list(UpperCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : int = 5_0 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
snake_case_ = len(UpperCAmelCase__ )
snake_case_ = self.transformer.config.sample_size
snake_case_ = self.transformer.config.in_channels
snake_case_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
snake_case_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
snake_case_ = torch.tensor(UpperCAmelCase__ , device=self.device ).reshape(-1 )
snake_case_ = torch.tensor([1_0_0_0] * batch_size , device=self.device )
snake_case_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
snake_case_ = latent_model_input[: len(UpperCAmelCase__ ) // 2]
snake_case_ = torch.cat([half, half] , dim=0 )
snake_case_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
snake_case_ = t
if not torch.is_tensor(UpperCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
snake_case_ = latent_model_input.device.type == """mps"""
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ = torch.floataa if is_mps else torch.floataa
else:
snake_case_ = torch.intaa if is_mps else torch.intaa
snake_case_ = torch.tensor([timesteps] , dtype=UpperCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
snake_case_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
snake_case_ = self.transformer(
UpperCAmelCase__ , timestep=UpperCAmelCase__ , class_labels=UpperCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
snake_case_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
snake_case_ = torch.split(UpperCAmelCase__ , len(UpperCAmelCase__ ) // 2 , dim=0 )
snake_case_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
snake_case_ = torch.cat([half_eps, half_eps] , dim=0 )
snake_case_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
snake_case_ = torch.split(UpperCAmelCase__ , UpperCAmelCase__ , dim=1 )
else:
snake_case_ = noise_pred
# compute previous image: x_t -> x_t-1
snake_case_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
if guidance_scale > 1:
snake_case_ = latent_model_input.chunk(2 , dim=0 )
else:
snake_case_ = latent_model_input
snake_case_ = 1 / self.vae.config.scaling_factor * latents
snake_case_ = self.vae.decode(UpperCAmelCase__ ).sample
snake_case_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 159 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= logging.get_logger()
# the current default level is logging.WARNING
__lowercase= logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
def _A (self ):
__lowercase= logging.get_verbosity()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase )
__lowercase= logging.log_levels[env_level_str]
__lowercase= logging.get_verbosity()
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__lowercase= ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _A (self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 295 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.