code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case__ , max_perimeter + 1 ):
A : Union[str, Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case__ ):
A : Optional[int] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCAmelCase_ ( snake_case__ = 1000 ):
'''simple docstring'''
A : str = pythagorean_triple(snake_case__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 3
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
if dataset.ndim != value_array.ndim:
__lowercase : Dict = (
"""Wrong input data's dimensions... """
F"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__lowercase : Dict = (
"""Wrong input data's shape... """
F"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__lowercase : Tuple = (
"""Input data have different datatype... """
F"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(lowerCAmelCase_ )
__lowercase : Union[str, Any] = []
for value in value_array:
__lowercase : Optional[int] = euclidean(lowerCAmelCase_ , dataset[0] )
__lowercase : Optional[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
__lowercase : Union[str, Any] = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
__lowercase : Tuple = temp_dist
__lowercase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray ):
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Dict = logging.get_logger(__name__)
set_seed(770)
UpperCAmelCase : List[Any] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase : Dict = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase : Dict = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase : List[Any] = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase : Optional[int] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCAmelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
hf_hub_download(repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , local_dir=__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="text" ) -> int:
'''simple docstring'''
if model_type == "text":
lowercase_ = BarkSemanticModel
lowercase_ = BarkSemanticConfig
lowercase_ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase_ = BarkCoarseModel
lowercase_ = BarkCoarseConfig
lowercase_ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase_ = BarkFineModel
lowercase_ = BarkFineConfig
lowercase_ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase_ = F'''{model_type}_small''' if use_small else model_type
lowercase_ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCAmelCase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowercase_ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
# this is a hack
lowercase_ = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowercase_ = model_args["""vocab_size"""]
lowercase_ = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase_ = model_args.pop("""n_head""" )
lowercase_ = model_args.pop("""n_embd""" )
lowercase_ = model_args.pop("""n_layer""" )
lowercase_ = ConfigClass(**checkpoint["""model_args"""] )
lowercase_ = ModelClass(config=__lowerCAmelCase )
lowercase_ = GenerationConfigClass()
lowercase_ = model_generation_config
lowercase_ = checkpoint["""model"""]
# fixup checkpoint
lowercase_ = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
lowercase_ = k[len(__lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
lowercase_ = new_k.replace(__lowerCAmelCase , new_layer_name_dict[old_layer_name] )
lowercase_ = state_dict.pop(__lowerCAmelCase )
lowercase_ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase_ = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowercase_ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase_ = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__lowerCAmelCase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__lowerCAmelCase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = model.num_parameters(exclude_embeddings=__lowerCAmelCase )
lowercase_ = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(__lowerCAmelCase , 3 )} loss''' )
model.eval()
model.to(__lowerCAmelCase )
del checkpoint, state_dict
return model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="text" ) -> str:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase_ = """cpu""" # do conversion on cpu
lowercase_ = _get_ckpt_path(__lowerCAmelCase , use_small=__lowerCAmelCase )
lowercase_ = _load_model(__lowerCAmelCase , __lowerCAmelCase , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
# load bark initial model
lowercase_ = _bark_load_model(__lowerCAmelCase , """cpu""" , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
if model_type == "text":
lowercase_ = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
lowercase_ = 5
lowercase_ = 10
if model_type in ["text", "coarse"]:
lowercase_ = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
lowercase_ = bark_model(__lowerCAmelCase )[0]
lowercase_ = model(__lowerCAmelCase )
# take last logits
lowercase_ = output_new_model_total.logits[:, [-1], :]
else:
lowercase_ = 3
lowercase_ = 8
lowercase_ = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase_ = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = bark_model(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> int:
'''simple docstring'''
lowercase_ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
lowercase_ = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
lowercase_ = BarkFineConfig.from_pretrained(os.path.join(__lowerCAmelCase , """config.json""" ) )
lowercase_ = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowercase_ = BarkSemanticModel.from_pretrained(__lowerCAmelCase )
lowercase_ = BarkCoarseModel.from_pretrained(__lowerCAmelCase )
lowercase_ = BarkFineModel.from_pretrained(__lowerCAmelCase )
lowercase_ = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowercase_ = BarkConfig.from_sub_model_configs(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase_ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase_ = BarkModel(__lowerCAmelCase )
lowercase_ = semantic
lowercase_ = coarseAcoustic
lowercase_ = fineAcoustic
lowercase_ = codec
lowercase_ = bark_generation_config
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
bark.save_pretrained(__lowerCAmelCase , repo_id=__lowerCAmelCase , push_to_hub=__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 136
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =10
SCREAMING_SNAKE_CASE_: Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE_: str =FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import bza
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import lzma
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import zipfile
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
_UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: int =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: int =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: Tuple =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import bza
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE_: List[Any] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
SCREAMING_SNAKE_CASE_: int =pq.ParquetWriter(lowercase , schema=lowercase )
SCREAMING_SNAKE_CASE_: str =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Tuple ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 173
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = 1 / 255 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a)
lowercase__ : Any = size if size is not None else {'height': 384, 'width': 384}
lowercase__ : List[Any] = get_size_dict(a , default_to_square=a)
lowercase__ : Tuple = do_resize
lowercase__ : Optional[int] = size
lowercase__ : int = resample
lowercase__ : Optional[Any] = do_rescale
lowercase__ : Union[str, Any] = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__ : Union[str, Any] = do_convert_rgb
def snake_case_ ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
lowercase__ : Optional[int] = get_size_dict(a , default_to_square=a)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
lowercase__ : Any = (size['height'], size['width'])
return resize(a , size=a , resample=a , data_format=a , **a)
def snake_case_ ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a)
def snake_case_ ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a)
def snake_case_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
lowercase__ : Dict = do_resize if do_resize is not None else self.do_resize
lowercase__ : Dict = resample if resample is not None else self.resample
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Optional[int] = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__ : Any = size if size is not None else self.size
lowercase__ : Any = get_size_dict(a , default_to_square=a)
lowercase__ : List[Any] = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__ : Optional[int] = [convert_to_rgb(a) for image in images]
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(a) for image in images]
if do_resize:
lowercase__ : str = [self.resize(image=a , size=a , resample=a) for image in images]
if do_rescale:
lowercase__ : Union[str, Any] = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
lowercase__ : Any = [self.normalize(image=a , mean=a , std=a) for image in images]
lowercase__ : List[str] = [to_channel_dimension_format(a , a) for image in images]
lowercase__ : str = BatchFeature(data={'pixel_values': images} , tensor_type=a)
return encoded_outputs
| 359
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
snake_case_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
snake_case_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
snake_case_ = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
snake_case_ = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
snake_case_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
snake_case_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 216
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246
| 0
|
def lowerCamelCase__ ( a ) -> bool:
return str(a ) == str(a )[::-1]
def lowerCamelCase__ ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def lowerCamelCase__ ( a = 1_00_00 ) -> int:
_A: Tuple = []
for num in range(1 , a ):
_A: int = 0
_A: Any = num
while iterations < 50:
_A: List[Any] = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 362
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301
| 0
|
import requests
_UpperCAmelCase : Union[str, Any] = "" # <-- Put your OpenWeatherMap appid here!
_UpperCAmelCase : int = "https://api.openweathermap.org/data/2.5/"
def A ( lowercase = "Chicago" , lowercase = APPID ) -> Optional[int]:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( lowercase = "Kolkata, India" , lowercase = APPID ) -> int:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( lowercase = 5_5.6_8 , lowercase = 1_2.5_7 , lowercase = APPID ) -> int:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_UpperCAmelCase : Optional[Any] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 222
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61
| 0
|
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Dict=False ):
'''simple docstring'''
try:
UpperCamelCase : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase : Any = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase : str = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
__A : int = parse_flag_from_env('''RUN_SLOW''', default=False)
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(snake_case_ )
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests ,"""test is slow""" )(snake_case_ )
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() ,"""test requires only a CPU""" )(snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() ,"""test requires a GPU""" )(snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() ,"""test requires a XPU""" )(snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() ,"""test requires a `mps` backend support in `torch`""" )(snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"""test requires the Hugging Face suite""" )(snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() ,"""test requires the bitsandbytes library""" )(snake_case_ )
def A_ ( snake_case_ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() ,"""test requires TPU""" )(snake_case_ )
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"""test requires a GPU""" )(snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"""test requires a XPU""" )(snake_case_ )
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"""test requires multiple GPUs""" )(snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"""test requires multiple XPUs""" )(snake_case_ )
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() ,"""test requires safetensors""" )(snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() ,"""test requires DeepSpeed""" )(snake_case_ )
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" ,"""1.12.0""" ) ,"""test requires torch version >= 1.12.0""" )(snake_case_ )
def A_ ( snake_case_ : Tuple=None ,snake_case_ : List[str]=None ):
'''simple docstring'''
if test_case is None:
return partial(snake_case_ ,version=snake_case_ )
return unittest.skipUnless(is_torch_version(""">=""" ,snake_case_ ) ,f'test requires torch version >= {version}' )(snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() ,"""test requires Tensorboard""" )(snake_case_ )
def A_ ( snake_case_ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() ,"""test requires wandb""" )(snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() ,"""test requires comet_ml""" )(snake_case_ )
__A : int = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available ,"""test requires at least one tracker to be available and for `comet_ml` to not be installed""" ,)(snake_case_ )
class lowerCamelCase ( unittest.TestCase ):
lowercase : Optional[Any] = True
@classmethod
def a_ ( cls ):
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def a_ ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def a_ ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = mocks if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
UpperCamelCase : List[str] = AcceleratorState()
UpperCamelCase : Dict = tensor[None].clone().to(state.device )
UpperCamelCase : Optional[Any] = gather(snake_case_ ).cpu()
UpperCamelCase : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,snake_case_ ):
return False
return True
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = returncode
UpperCamelCase : List[str] = stdout
UpperCamelCase : Union[str, Any] = stderr
async def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : List[str] ):
'''simple docstring'''
while True:
UpperCamelCase : Any = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def A_ ( snake_case_ : List[str] ,snake_case_ : Tuple=None ,snake_case_ : int=None ,snake_case_ : str=None ,snake_case_ : Dict=False ,snake_case_ : List[str]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(snake_case_ ) )
UpperCamelCase : Any = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=snake_case_ ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=snake_case_ ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase : List[Any] = []
UpperCamelCase : List[str] = []
def tee(snake_case_ : List[Any] ,snake_case_ : Tuple ,snake_case_ : int ,snake_case_ : str="" ):
UpperCamelCase : Tuple = line.decode("""utf-8""" ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ ,snake_case_ ,file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda snake_case_ : tee(snake_case_ ,snake_case_ ,sys.stdout ,label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda snake_case_ : tee(snake_case_ ,snake_case_ ,sys.stderr ,label="""stderr:""" ) ) ),
] ,timeout=snake_case_ ,)
return _RunOutput(await p.wait() ,snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Optional[Any]=None ,snake_case_ : Dict=None ,snake_case_ : Any=1_8_0 ,snake_case_ : List[str]=False ,snake_case_ : Optional[Any]=True ):
'''simple docstring'''
UpperCamelCase : List[str] = asyncio.get_event_loop()
UpperCamelCase : Dict = loop.run_until_complete(
_stream_subprocess(snake_case_ ,env=snake_case_ ,stdin=snake_case_ ,timeout=snake_case_ ,quiet=snake_case_ ,echo=snake_case_ ) )
UpperCamelCase : int = """ """.join(snake_case_ )
if result.returncode > 0:
UpperCamelCase : List[Any] = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase ( _UpperCAmelCase ):
pass
def A_ ( snake_case_ : List[str] ,snake_case_ : Optional[int]=False ):
'''simple docstring'''
try:
UpperCamelCase : Dict = subprocess.check_output(snake_case_ ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case_ ,"""decode""" ):
UpperCamelCase : Optional[Any] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(snake_case_ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 27
|
"""simple docstring"""
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase , UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 27
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
lowerCAmelCase : List[Any] = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = torch.load(_UpperCAmelCase , map_location="cpu" )
return sd
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=rename_keys_prefix ):
SCREAMING_SNAKE_CASE_: List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE_: List[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE_: Tuple = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE_: List[str] = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE_: Optional[int] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE_: Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Any = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[Any] = {"visual_embedding_dim": 5_12}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_: int = {"visual_embedding_dim": 20_48}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[int] = {"visual_embedding_dim": 20_48}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: List[str] = {"visual_embedding_dim": 10_24}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: List[str] = {"visual_embedding_dim": 5_12}
SCREAMING_SNAKE_CASE_: str = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"visual_embedding_dim": 20_48}
SCREAMING_SNAKE_CASE_: List[str] = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"visual_embedding_dim": 20_48, "num_labels": 31_29}
SCREAMING_SNAKE_CASE_: Optional[int] = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: str = {
"visual_embedding_dim": 10_24,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE_: Dict = "nlvr"
SCREAMING_SNAKE_CASE_: Dict = VisualBertConfig(**_UpperCAmelCase )
# Load State Dict
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = get_new_dict(_UpperCAmelCase , _UpperCAmelCase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE_: Dict = VisualBertForPreTraining(_UpperCAmelCase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE_: str = VisualBertForQuestionAnswering(_UpperCAmelCase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE_: Tuple = VisualBertForVisualReasoning(_UpperCAmelCase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE_: Optional[Any] = VisualBertForMultipleChoice(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 13
|
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowerCAmelCase ( UpperCAmelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( UpperCAmelCase : np.ndarray , UpperCAmelCase : np.ndarray ):
'''simple docstring'''
UpperCamelCase__ : List[Any] =XGBClassifier()
classifier.fit(UpperCAmelCase , UpperCAmelCase )
return classifier
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Dict =load_iris()
UpperCamelCase__ , UpperCamelCase__ : Tuple =data_handling(UpperCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple =train_test_split(
UpperCAmelCase , UpperCAmelCase , test_size=0.25 )
UpperCamelCase__ : Any =iris['''target_names''']
# Create an XGBoost Classifier from the training data
UpperCamelCase__ : Tuple =xgboost(UpperCAmelCase , UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , display_labels=UpperCAmelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 157
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 157
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCAmelCase :
lowercase = 42
lowercase = None
lowercase = None
def A ( ) -> Node | None:
__UpperCamelCase = Node(1 )
__UpperCamelCase = Node(2 )
__UpperCamelCase = Node(3 )
__UpperCamelCase = Node(4 )
__UpperCamelCase = Node(5 )
return tree
def A ( snake_case :Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A ( snake_case :Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A ( snake_case :Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A ( snake_case :Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A ( snake_case :Node | None ) -> Sequence[Node | None]:
__UpperCamelCase = []
if root is None:
return output
__UpperCamelCase = deque([root] )
while process_queue:
__UpperCamelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A ( snake_case :Node | None , snake_case :int ) -> Sequence[Node | None]:
__UpperCamelCase = []
def populate_output(snake_case :Node | None , snake_case :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case , snake_case )
return output
def A ( snake_case :Node | None , snake_case :int ) -> Sequence[Node | None]:
__UpperCamelCase = []
def populate_output(snake_case :Node | None , snake_case :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case , snake_case )
return output
def A ( snake_case :Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__UpperCamelCase = []
__UpperCamelCase = 0
__UpperCamelCase = height(snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case , snake_case ) )
__UpperCamelCase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case , snake_case ) )
__UpperCamelCase = 0
return output
def A ( ) -> None: # Main function for testing.
__UpperCamelCase = make_tree()
print(f'In-order Traversal: {inorder(snake_case )}' )
print(f'Pre-order Traversal: {preorder(snake_case )}' )
print(f'Post-order Traversal: {postorder(snake_case )}' , '\n' )
print(f'Height of Tree: {height(snake_case )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(snake_case ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(snake_case ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(snake_case , level=snake_case ) )
print('\nZigZag order Traversal: ' )
print(zigzag(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 316
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = IFInpaintingSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase ( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase ( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 316
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class _a ( _UpperCAmelCase):
_a : int = "xlm"
_a : List[Any] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=3_0145 , _SCREAMING_SNAKE_CASE : int=2048 , _SCREAMING_SNAKE_CASE : Any=12 , _SCREAMING_SNAKE_CASE : Tuple=16 , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=512 , _SCREAMING_SNAKE_CASE : Union[str, Any]=2048**-0.5 , _SCREAMING_SNAKE_CASE : int=1E-12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=0 , _SCREAMING_SNAKE_CASE : int=1 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : Optional[int]=3 , _SCREAMING_SNAKE_CASE : int=5 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : List[Any]="first" , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : Any=5 , _SCREAMING_SNAKE_CASE : List[Any]=5 , _SCREAMING_SNAKE_CASE : Tuple=0 , _SCREAMING_SNAKE_CASE : Dict=0 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : int=0 , **_SCREAMING_SNAKE_CASE : str , )-> int:
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Any = emb_dim
lowerCAmelCase__ : Optional[int] = n_layers
lowerCAmelCase__ : Optional[int] = n_heads
lowerCAmelCase__ : Dict = dropout
lowerCAmelCase__ : str = attention_dropout
lowerCAmelCase__ : Union[str, Any] = gelu_activation
lowerCAmelCase__ : Union[str, Any] = sinusoidal_embeddings
lowerCAmelCase__ : Union[str, Any] = causal
lowerCAmelCase__ : int = asm
lowerCAmelCase__ : Tuple = n_langs
lowerCAmelCase__ : Tuple = use_lang_emb
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : List[str] = bos_index
lowerCAmelCase__ : Optional[Any] = eos_index
lowerCAmelCase__ : int = pad_index
lowerCAmelCase__ : Union[str, Any] = unk_index
lowerCAmelCase__ : Union[str, Any] = mask_index
lowerCAmelCase__ : Tuple = is_encoder
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Optional[int] = embed_init_std
lowerCAmelCase__ : Tuple = init_std
lowerCAmelCase__ : List[Any] = summary_type
lowerCAmelCase__ : List[str] = summary_use_proj
lowerCAmelCase__ : Optional[int] = summary_activation
lowerCAmelCase__ : Any = summary_proj_to_labels
lowerCAmelCase__ : Any = summary_first_dropout
lowerCAmelCase__ : List[str] = start_n_top
lowerCAmelCase__ : Any = end_n_top
lowerCAmelCase__ : Optional[Any] = mask_token_id
lowerCAmelCase__ : Union[str, Any] = lang_id
if "n_words" in kwargs:
lowerCAmelCase__ : Tuple = kwargs['''n_words''']
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class _a ( _UpperCAmelCase):
@property
def UpperCAmelCase__( self : Optional[Any] )-> List[str]:
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 352
|
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211
| 0
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 64
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(__lowerCAmelCase )
lowercase_ = np.array(__lowerCAmelCase )
if reduce_labels:
lowercase_ = 2_55
lowercase_ = label - 1
lowercase_ = 2_55
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = pred_label[mask]
lowercase_ = np.array(__lowerCAmelCase )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = np.nanmean(__lowerCAmelCase )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))),
}) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ):
"""simple docstring"""
lowercase_ = mean_iou(
results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , )
return iou_result
| 136
| 0
|
'''simple docstring'''
import operator as op
__snake_case ="scaler.pt"
__snake_case ="pytorch_model"
__snake_case ="random_states"
__snake_case ="optimizer"
__snake_case ="scheduler"
__snake_case ="pytorch_model.bin"
__snake_case ="pytorch_model.bin.index.json"
__snake_case ="model.safetensors"
__snake_case ="model.safetensors.index.json"
__snake_case ="1.10.2"
__snake_case ="py38"
__snake_case ="4.17.0"
__snake_case =["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__snake_case =["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__snake_case =["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__snake_case =["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__snake_case =["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__snake_case ="2.0.1"
__snake_case =["pdsh", "standard", "openmpi", "mvapich"]
__snake_case =["default", "reduce-overhead", "max-autotune"]
__snake_case ={">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__snake_case =[
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__snake_case =["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__snake_case =["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 369
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
lowerCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = None
lowerCAmelCase = 2_0
lowerCAmelCase = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase__ )
# tweak scores to not be uniform anymore
lowerCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase = jax.nn.softmax(UpperCAmelCase__ , axis=-1 )
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
lowerCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase__ , scores.copy() , cur_len=UpperCAmelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = None
lowerCAmelCase = 1_0
lowerCAmelCase = 2
# create ramp distribution
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase = 5
lowerCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase = top_k_warp_safety_check(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = None
lowerCAmelCase = 1_0
lowerCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase = np.exp(top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase = np.broadcast_to(np.arange(UpperCAmelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
# check that min length is applied at length 5
lowerCAmelCase = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
lowerCAmelCase = 5
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = 1_5
lowerCAmelCase = min_dist_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase = ids_tensor((batch_size, 1) , vocab_size=2_0 )
lowerCAmelCase = 1
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase = 3
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = 2_0
lowerCAmelCase = 4
lowerCAmelCase = 0
lowerCAmelCase = 5
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase = ids_tensor((batch_size, 4) , vocab_size=2_0 )
lowerCAmelCase = 4
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase = 3
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = logits_processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
self.assertFalse(jnp.isinf(UpperCAmelCase__ ).any() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase = 4
lowerCAmelCase = 1_0
lowerCAmelCase = 1_5
lowerCAmelCase = 2
lowerCAmelCase = 1
lowerCAmelCase = 1_5
# dummy input_ids and scores
lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
lowerCAmelCase = input_ids.copy()
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scores.copy()
# instantiate all dist processors
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = 1_0
# no processor list
lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# with processor list
lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = 4
lowerCAmelCase = 1_0
lowerCAmelCase = 1_5
lowerCAmelCase = 2
lowerCAmelCase = 1
lowerCAmelCase = 1_5
# dummy input_ids and scores
lowerCAmelCase = ids_tensor((batch_size, sequence_length) , UpperCAmelCase__ )
lowerCAmelCase = input_ids.copy()
lowerCAmelCase = self._get_uniform_logits(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scores.copy()
# instantiate all dist processors
lowerCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase = FlaxTopKLogitsWarper(3 )
lowerCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase__ )
lowerCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowerCAmelCase = 1_0
# no processor list
def run_no_processor_list(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
lowerCAmelCase = temp_dist_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_k_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = top_p_warp(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = min_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = bos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
lowerCAmelCase = eos_dist_proc(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
# with processor list
def run_processor_list(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
lowerCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase = processor(UpperCAmelCase__ , UpperCAmelCase__ , cur_len=UpperCAmelCase__ )
return scores
lowerCAmelCase = jax.jit(UpperCAmelCase__ )
lowerCAmelCase = jax.jit(UpperCAmelCase__ )
lowerCAmelCase = jitted_run_no_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = jitted_run_processor_list(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 55
| 0
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple ='pixel_values'
lowercase : Any =False
lowercase : int =TimmBackboneConfig
def __init__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, '''timm''' )
super().__init__(lowerCAmelCase )
lowerCamelCase_ =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(lowerCAmelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowerCamelCase_ =getattr(lowerCAmelCase, '''use_pretrained_backbone''', lowerCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ =config.out_indices if getattr(lowerCAmelCase, '''out_indices''', lowerCAmelCase ) is not None else (-1,)
lowerCamelCase_ =timm.create_model(
config.backbone, pretrained=lowerCAmelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCAmelCase, **lowerCAmelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ =self._backbone.return_layers
lowerCamelCase_ ={layer['''module''']: str(lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase )
@classmethod
def lowercase__ ( cls, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ =kwargs.pop('''config''', TimmBackboneConfig() )
lowerCamelCase_ =kwargs.pop('''use_timm_backbone''', lowerCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowerCamelCase_ =kwargs.pop('''num_channels''', config.num_channels )
lowerCamelCase_ =kwargs.pop('''features_only''', config.features_only )
lowerCamelCase_ =kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowerCamelCase_ =kwargs.pop('''out_indices''', config.out_indices )
lowerCamelCase_ =TimmBackboneConfig(
backbone=lowerCAmelCase, num_channels=lowerCAmelCase, features_only=lowerCAmelCase, use_pretrained_backbone=lowerCAmelCase, out_indices=lowerCAmelCase, )
return super()._from_config(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
pass
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ =self._all_layers
lowerCamelCase_ =self._backbone(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =self._return_layers
lowerCamelCase_ =tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ =self._backbone(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =None
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =tuple(lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ =(feature_maps,)
if output_hidden_states:
lowerCamelCase_ =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase, hidden_states=lowerCAmelCase, attentions=lowerCAmelCase )
| 75
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
snake_case_ = logging.get_logger(__name__)
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : type , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowercase__ : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowercase__ : int = format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Exception , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None ):
'''simple docstring'''
lowercase__ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
snake_case_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
snake_case_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
snake_case_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Tuple = get_format_type_from_alias(SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 214
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353
|
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
| 0
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__A : Optional[int] = logging.get_logger(__name__)
__A : List[Any] = {
'''artists_file''': '''artists.json''',
'''lyrics_file''': '''lyrics.json''',
'''genres_file''': '''genres.json''',
}
__A : Any = {
'''artists_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''',
},
'''genres_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''',
},
'''lyrics_file''': {
'''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''',
},
}
__A : str = {
'''jukebox''': 512,
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase_ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=["v3", "v2", "v2"] , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : Any="<|endoftext|>" , **UpperCAmelCase_ : Tuple , ):
lowerCAmelCase : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ , n_genres=UpperCAmelCase_ , version=UpperCAmelCase_ , max_n_lyric_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase : Tuple = version
lowerCAmelCase : Any = max_n_lyric_tokens
lowerCAmelCase : int = n_genres
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase : Optional[int] = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase : Tuple = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase : Any = json.load(UpperCAmelCase_ )
lowerCAmelCase : List[str] = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowerCAmelCase : int = oov.replace(R'\-\'' , R'\-+\'' )
lowerCAmelCase : List[Any] = regex.compile(UpperCAmelCase_ )
lowerCAmelCase : Any = {v: k for k, v in self.artists_encoder.items()}
lowerCAmelCase : Optional[Any] = {v: k for k, v in self.genres_encoder.items()}
lowerCAmelCase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowercase__ ( self : Optional[Any] ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowercase__ ( self : List[str] ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : Optional[Any] = [self.artists_encoder.get(UpperCAmelCase_ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase : List[Any] = [self.genres_encoder.get(UpperCAmelCase_ , 0 ) for genre in list_genres[genres]]
lowerCAmelCase : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowerCAmelCase : int = [[self.lyrics_encoder.get(UpperCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Optional[int] ):
return list(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = self.prepare_for_tokenization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : str = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def lowercase__ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowerCAmelCase : Tuple = artists[idx].lower()
lowerCAmelCase : Union[str, Any] = [genres[idx].lower()]
else:
lowerCAmelCase : Dict = self._normalize(artists[idx] ) + '.v2'
lowerCAmelCase : Optional[Any] = [
self._normalize(UpperCAmelCase_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowerCAmelCase : Optional[Any] = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
lowerCAmelCase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
lowerCAmelCase : Dict = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : int = len(UpperCAmelCase_ ) + 1
lowerCAmelCase : int = self.vocab
lowerCAmelCase : List[str] = {v: k for k, v in self.vocab.items()}
lowerCAmelCase : Dict = ''
else:
lowerCAmelCase : Tuple = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
lowerCAmelCase : int = self._run_strip_accents(UpperCAmelCase_ )
lowerCAmelCase : List[str] = lyrics.replace('\\' , '\n' )
lowerCAmelCase : Optional[Any] = self.out_of_vocab.sub('' , UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict ):
lowerCAmelCase : str = unicodedata.normalize('NFD' , UpperCAmelCase_ )
lowerCAmelCase : str = []
for char in text:
lowerCAmelCase : Dict = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[int] = (
[chr(UpperCAmelCase_ ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
lowerCAmelCase : Tuple = frozenset(UpperCAmelCase_ )
lowerCAmelCase : str = re.compile(R'_+' )
lowerCAmelCase : Tuple = ''.join([c if c in accepted else '_' for c in text.lower()] )
lowerCAmelCase : Tuple = pattern.sub('_' , UpperCAmelCase_ ).strip('_' )
return text
def lowercase__ ( self : str , UpperCAmelCase_ : List[str] ):
return " ".join(UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : bool = False ):
# Convert to TensorType
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Union[str, Any] = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
lowerCAmelCase : List[str] = tf.constant
lowerCAmelCase : str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
lowerCAmelCase : List[Any] = torch.tensor
lowerCAmelCase : Optional[int] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
lowerCAmelCase : Optional[Any] = jnp.array
lowerCAmelCase : Optional[Any] = _is_jax
else:
lowerCAmelCase : Any = np.asarray
lowerCAmelCase : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowerCAmelCase : Optional[Any] = [inputs]
if not is_tensor(UpperCAmelCase_ ):
lowerCAmelCase : Any = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]="" , UpperCAmelCase_ : List[str]="pt" ):
lowerCAmelCase : Dict = [0, 0, 0]
lowerCAmelCase : Dict = [artist] * len(self.version )
lowerCAmelCase : Tuple = [genres] * len(self.version )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.tokenize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self._convert_token_to_id(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = [-INFINITY] * len(full_tokens[-1] )
lowerCAmelCase : List[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def lowercase__ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase_ ) )
lowerCAmelCase : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : int = self.artists_decoder.get(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
lowerCAmelCase : List[Any] = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics
| 138
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : List[Any] = ParquetConfig
def lowercase__ ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowerCAmelCase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase : int = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase : Any = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : int = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'files': files} ) )
return splits
def lowercase__ ( self : Any , UpperCAmelCase_ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase : Union[str, Any] = table_cast(UpperCAmelCase_ , self.info.features.arrow_schema )
return pa_table
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : Optional[Any] = pq.ParquetFile(UpperCAmelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(UpperCAmelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}" )
raise
| 138
| 1
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __magic_name__ ( A ) -> Optional[Any]:
snake_case = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
snake_case = MaskFormerConfig(backbone_config=A )
snake_case = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
snake_case = 8_4_7
snake_case = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
snake_case = 1_5_0
snake_case = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
snake_case = 1_7_1
snake_case = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
snake_case = 1_3_3
snake_case = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
snake_case = 1_9
snake_case = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
snake_case = 6_5
snake_case = 'mapillary-vistas-id2label.json'
snake_case = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
return config
def __magic_name__ ( A ) -> Dict:
snake_case = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = dct.pop(A )
snake_case = val
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[:dim, :]
snake_case = in_proj_bias[: dim]
snake_case = in_proj_weight[
dim : dim * 2, :
]
snake_case = in_proj_bias[
dim : dim * 2
]
snake_case = in_proj_weight[
-dim :, :
]
snake_case = in_proj_bias[-dim :]
# fmt: on
def __magic_name__ ( A , A ) -> int:
# fmt: off
snake_case = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[: hidden_size, :]
snake_case = in_proj_bias[:config.hidden_size]
snake_case = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case = in_proj_bias[hidden_size : hidden_size * 2]
snake_case = in_proj_weight[-hidden_size :, :]
snake_case = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
snake_case = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[: hidden_size, :]
snake_case = in_proj_bias[:config.hidden_size]
snake_case = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case = in_proj_bias[hidden_size : hidden_size * 2]
snake_case = in_proj_weight[-hidden_size :, :]
snake_case = in_proj_bias[-hidden_size :]
# fmt: on
def __magic_name__ ( ) -> torch.Tensor:
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __magic_name__ ( A , A , A , A = False ) -> Tuple:
snake_case = get_maskformer_config(A )
# load original state_dict
with open(A , 'rb' ) as f:
snake_case = pickle.load(A )
snake_case = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
snake_case = torch.from_numpy(A )
# load 🤗 model
snake_case = MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
snake_case , snake_case = model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
snake_case = prepare_img()
if "vistas" in model_name:
snake_case = 6_5
elif "cityscapes" in model_name:
snake_case = 6_5_5_3_5
else:
snake_case = 2_5_5
snake_case = True if 'ade' in model_name else False
snake_case = MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
snake_case = image_processor(A , return_tensors='pt' )
snake_case = model(**A )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 369
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a : Tuple = get_tests_dir("fixtures")
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = mock.Mock()
UpperCAmelCase : Optional[Any] = 5_0_0
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : int = HTTPError
UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case ) as mock_head:
UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls ):
'''simple docstring'''
UpperCAmelCase : int = TOKEN
HfFolder.save_token(snake_case )
@classmethod
def A_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case , repo_id="test-feature-extractor" , push_to_hub=snake_case , use_auth_token=self._token )
UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
snake_case , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=snake_case , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(snake_case , getattr(snake_case , snake_case ) )
def A_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(snake_case )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=snake_case )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 311
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase : Optional[int] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Any = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
UpperCAmelCase : Tuple = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCAmelCase : str = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
a : Union[str, Any] = Accelerator()
a : str = (accelerator.state.process_index + 2, 10)
a : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
a : Optional[int] = ""
a : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
a : List[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
a : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 311
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase =' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
_UpperCAmelCase : Optional[Any] =self.transformer_dir
shutil.copy(
os.path.join(snake_case , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int ='src/transformers'
shutil.rmtree(self.transformer_dir)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case=None) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Union[str, Any] =comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_UpperCAmelCase : Tuple =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
_UpperCAmelCase : str =black.format_str(snake_case , mode=snake_case)
_UpperCAmelCase : Dict =os.path.join(self.transformer_dir , 'new_code.py')
with open(snake_case , 'w' , newline='\n') as f:
f.write(snake_case)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case)
with open(snake_case , 'r') as f:
self.assertTrue(f.read() , snake_case)
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , snake_case) , )
# Copy consistency with a really long name
_UpperCAmelCase : Any ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub('Bert' , snake_case , snake_case) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , snake_case , overwrite_result=re.sub('Bert' , 'TestModel' , snake_case) , )
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int =check_copies.LOCALIZED_READMES['README_zh-hans.md']
_UpperCAmelCase : Optional[Any] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_UpperCAmelCase : Optional[Any] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : Optional[Any] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_UpperCAmelCase : Optional[Any] =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
self.assertFalse(snake_case)
self.assertEqual(snake_case , snake_case)
_UpperCAmelCase : List[str] =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(snake_case)
_UpperCAmelCase : Union[str, Any] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_UpperCAmelCase : Optional[int] =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : List[Any] =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_UpperCAmelCase : Tuple =check_copies.convert_to_localized_md(
snake_case , snake_case , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(snake_case , snake_case)
| 352
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : str =[e for e in lst if e < pivot]
_UpperCAmelCase : Dict =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowercase : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : tuple , _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=False , ):
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False ):
__a : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : Dict = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__a : Union[str, Any] = 'cpu'
__a : str = Path(_SCREAMING_SNAKE_CASE )
# VAE DECODER
__a : Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__a : Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__a : Any = vae_decoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowercase : Tuple = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 27
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : int = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ["""LayoutLMv2FeatureExtractor"""]
__lowerCamelCase : Any = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowerCAmelCase = 2_0_4_8
lowerCAmelCase = 4_0_9_6
lowerCAmelCase = 4_2
lowerCAmelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
lowerCAmelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
def choose_first(lowercase__ , lowercase__=False ):
assert isinstance(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
__lowercase= answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowercase= {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
__lowercase= {'id': example['id']}
__lowercase= example['annotations']
__lowercase= annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowercase= ['yes'] if 1 in yes_no_answer else ['no']
__lowercase= __lowercase= []
__lowercase= __lowercase= []
__lowercase= ['<cls>']
else:
__lowercase= ['short']
__lowercase= choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
__lowercase= ['long']
__lowercase= choose_first(annotation['long_answer'] , is_long_answer=lowercase__ )
__lowercase= []
answer.update(lowercase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowercase= True
else:
__lowercase= False
__lowercase= ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , lowercase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def _lowerCamelCase( lowercase__ , lowercase__=False ) -> str:
'''simple docstring'''
__lowercase= _get_single_answer(lowercase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase= example['document']['tokens']
__lowercase= []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowercase= ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowercase= example['document']['tokens']
__lowercase= answer['start_token']
__lowercase= answer['end_token']
__lowercase= []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowercase= ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
__lowercase= doc['is_html'][answer['start_token'] : answer['end_token']]
__lowercase= doc['token'][answer['start_token'] : answer['end_token']]
__lowercase= ' '.join([old[i] for i in range(len(lowercase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , lowercase__ , end='\n' )
print('Old:' , lowercase__ , end='\n\n' )
return {
"context": " ".join(lowercase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=2_0_4_8 , lowercase__=4_0_9_6 , lowercase__=True ) -> Optional[Any]:
'''simple docstring'''
__lowercase= get_context_and_ans(lowercase__ , assertion=lowercase__ )
__lowercase= out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowercase= tokenizer(example['question']['text'] , out['context'] ).input_ids
__lowercase= input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase= []
__lowercase= []
__lowercase= input_ids[:q_len]
__lowercase= range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
for i in doc_start_indices:
__lowercase= i + max_length - q_len
__lowercase= input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowercase__ ),
"end_token": [-1_0_0] * len(lowercase__ ),
"category": category,
},
}
__lowercase= out['context'].split()
__lowercase= splitted_context[answer['end_token']]
__lowercase= len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowercase__ , ).input_ids )
__lowercase= len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowercase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowercase= len(tokenizer(lowercase__ , add_special_tokens=lowercase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowercase= input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
__lowercase= answer['start_token']
__lowercase= answer['end_token']
if assertion:
__lowercase= tokenizer.decode(lowercase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , lowercase__ , end='\n\n' )
if len(lowercase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowercase= input_ids[:q_len]
__lowercase= range(lowercase__ , len(lowercase__ ) , max_length - doc_stride )
__lowercase= []
__lowercase= []
__lowercase= []
__lowercase= [] # null, yes, no, long, short
for i in doc_start_indices:
__lowercase= i + max_length - q_len
__lowercase= input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowercase= start_token - i + q_len
__lowercase= end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
__lowercase= -1_0_0
__lowercase= -1_0_0
answers_category.append('null' )
__lowercase= inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase__ )
answers_end_token.append(lowercase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(lowercase__ ) )
print('Old:' , tokenizer.decode(lowercase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=2_0_4_8 , lowercase__=4_0_9_6 , lowercase__=False ) -> Tuple:
'''simple docstring'''
__lowercase= get_strided_contexts_and_ans(
lowercase__ , lowercase__ , doc_stride=lowercase__ , max_length=lowercase__ , assertion=lowercase__ , )
return example
def _lowerCamelCase( lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
with jsonlines.open(lowercase__ , 'a' ) as writer:
for example in tqdm(lowercase__ , total=len(lowercase__ ) , desc='Saving samples ... ' ):
__lowercase= example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowerCAmelCase = load_dataset('''natural_questions''')
lowerCAmelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
lowerCAmelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
lowerCAmelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowerCAmelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
lowerCAmelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 295
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295
| 1
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase :str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase :list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase :set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase :list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = ""
A_ : int
A_ : int
A_ : int
for keychar, cipherchar in zip(cycle(lowerCamelCase__ ) , lowerCamelCase__ ):
A_ : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase__ )
return decoded
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : list[str] = []
for key in product(lowerCamelCase__ , repeat=3 ):
A_ : Union[str, Any] = try_key(lowerCamelCase__ , lowerCamelCase__ )
if encoded is not None:
possibles.append(lowerCamelCase__ )
return possibles
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def a ( lowerCamelCase__ = "p059_cipher.txt" ):
'''simple docstring'''
A_ : list[int]
A_ : list[str]
A_ : str
A_ : str
A_ : str = Path(lowerCamelCase__ ).parent.joinpath(lowerCamelCase__ ).read_text(encoding="""utf-8""" )
A_ : Optional[Any] = [int(lowerCamelCase__ ) for number in data.strip().split(""",""" )]
A_ : Union[str, Any] = filter_valid_chars(lowerCamelCase__ )
for common_word in COMMON_WORDS:
A_ : Optional[Any] = filter_common_word(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
break
A_ : str = possibles[0]
return sum(ord(lowerCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 135
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase :Optional[int] = logging.getLogger(__name__)
lowerCamelCase :Optional[int] = tf.data.AUTOTUNE
def a ( ):
'''simple docstring'''
A_ : Any = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
A_ : Dict = parser.parse_args()
return args
def a ( lowerCamelCase__ ):
'''simple docstring'''
try:
if args.tpu_name:
A_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
A_ : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ )
return tpu
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = 0
for file in file_list:
A_ : Optional[Any] = file.split("""/""" )[-1]
A_ : Optional[int] = re.search(r"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 )
A_ : Any = int(lowerCamelCase__ )
num_samples += sample_count
return num_samples
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
A_ : List[Any] = count_samples(lowerCamelCase__ )
A_ : List[str] = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ )
if shuffle:
A_ : Optional[int] = dataset.shuffle(len(lowerCamelCase__ ) )
A_ : Tuple = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A_ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) )
A_ : Any = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
A_ : List[str] = dataset.shuffle(args.shuffle_buffer_size )
A_ : str = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ )
A_ : str = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
A_ : Optional[int] = dataset.prefetch(lowerCamelCase__ )
return dataset
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not args.no_tpu:
A_ : Dict = initialize_tpu(lowerCamelCase__ )
A_ : str = tf.distribute.TPUStrategy(lowerCamelCase__ )
else:
A_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
A_ : str = AutoConfig.from_pretrained(args.pretrained_model_config )
A_ : Optional[int] = tokenizer.vocab_size
A_ : Dict = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
A_ : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
A_ : Dict = count_samples(lowerCamelCase__ )
A_ : Optional[Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A_ : Dict = steps_per_epoch * args.num_epochs
with strategy.scope():
A_ : Optional[int] = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A_, A_ : Any = create_optimizer(
num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCamelCase__ ):
A_ : Optional[int] = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A_ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCamelCase__ ):
# TF really needs an isin() function
A_ : str = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A_, A_ : Union[str, Any] = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , )
return batch
A_ : str = args.per_replica_batch_size * strategy.num_replicas_in_sync
A_ : Tuple = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
A_ : Optional[Any] = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
A_ : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) )
model.fit(
lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase :Tuple = parse_args()
main(args)
| 135
| 1
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]="None" , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=None , ) -> int:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = relative_attention
a = position_biased_input
a = pos_att_type
a = scope
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str ) -> Union[str, Any]:
a = TFDebertaVaModel(config=__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = [input_ids, input_mask]
a = model(__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ) -> Optional[Any]:
a = TFDebertaVaForMaskedLM(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
a = self.num_labels
a = TFDebertaVaForSequenceClassification(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Any:
a = self.num_labels
a = TFDebertaVaForTokenClassification(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
a = TFDebertaVaForQuestionAnswering(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Dict ) -> int:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __UpperCAmelCase ( self : int ) -> int:
a = TFDebertaVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 )
| 107
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def a_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1.0E4 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowercase__ : Optional[Any] = float(embedding_dim // 2 )
lowercase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ : Any = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ : Dict = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
lowercase__ : List[str] = scale * emb
if flip_sin_to_cos:
lowercase__ : Dict = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
lowercase__ : Optional[int] = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
lowercase__ : List[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a ) -> Any:
lowercase__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(a )
lowercase__ : Union[str, Any] = nn.silu(a )
lowercase__ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(a )
return temb
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : bool = False
lowerCamelCase__ : float = 1
@nn.compact
def __call__( self , a ) -> str:
return get_sinusoidal_embeddings(
a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 77
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __snake_case :
lowerCAmelCase_ = MBartConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = "gelu"
def __init__( self : List[str] , _lowercase : Optional[int] , _lowercase : List[Any]=13 , _lowercase : List[Any]=7 , _lowercase : Dict=True , _lowercase : Tuple=False , _lowercase : Optional[Any]=99 , _lowercase : Dict=32 , _lowercase : str=2 , _lowercase : str=4 , _lowercase : Tuple=37 , _lowercase : Tuple=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Optional[int]=20 , _lowercase : Dict=2 , _lowercase : List[str]=1 , _lowercase : Union[str, Any]=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFMBartModel(config=_A ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
SCREAMING_SNAKE_CASE__ = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : str=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[Any]=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __snake_case ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Any , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : int , _lowercase : Dict ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFMBartModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_A )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = [
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCAmelCase_ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCAmelCase_ = "facebook/mbart-large-en-ro"
@cached_property
def __a ( self : str ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self : Optional[int] , **_lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def __a ( self : Union[str, Any] , **_lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , **_A , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def __a ( self : str ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 350
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
SCREAMING_SNAKE_CASE__ = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 204
| 0
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
for j in range(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list[float]:
'''simple docstring'''
UpperCAmelCase = [float('''inf''' )] * vertex_count
UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
UpperCAmelCase = distance[u] + w
UpperCAmelCase = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter number of vertices: ").strip())
__A : Optional[int] = int(input("Enter number of edges: ").strip())
__A : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__A , __A , __A : str = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__A : List[Any] = {"src": src, "dst": dest, "weight": weight}
__A : int = int(input("\nEnter shortest path source:").strip())
__A : Union[str, Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 273
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273
| 1
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase =logging.get_logger(__name__)
class __magic_name__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self , **snake_case) -> Tuple:
'''simple docstring'''
requires_backends(self , ['bs4'])
super().__init__(**snake_case)
def lowerCAmelCase ( self , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[]
_UpperCAmelCase : Union[str, Any] =[]
_UpperCAmelCase : List[Any] =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCAmelCase : List[Any] =parent.find_all(child.name , recursive=snake_case)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(snake_case) else next(i for i, s in enumerate(snake_case , 1) if s is child))
_UpperCAmelCase : Dict =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCAmelCase ( self , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str =BeautifulSoup(snake_case , 'html.parser')
_UpperCAmelCase : Optional[Any] =[]
_UpperCAmelCase : Dict =[]
_UpperCAmelCase : Union[str, Any] =[]
for element in html_code.descendants:
if type(snake_case) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
_UpperCAmelCase : Optional[int] =html.unescape(snake_case).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(snake_case)
_UpperCAmelCase , _UpperCAmelCase : Dict =self.xpath_soup(snake_case)
stringaxtag_seq.append(snake_case)
stringaxsubs_seq.append(snake_case)
if len(snake_case) != len(snake_case):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(snake_case) != len(snake_case):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCAmelCase ( self , snake_case , snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =''
for tagname, subs in zip(snake_case , snake_case):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , snake_case) -> BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Tuple =False
# Check that strings has a valid type
if isinstance(snake_case , snake_case):
_UpperCAmelCase : int =True
elif isinstance(snake_case , (list, tuple)):
if len(snake_case) == 0 or isinstance(html_strings[0] , snake_case):
_UpperCAmelCase : Union[str, Any] =True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"but is of type {type(snake_case)}.")
_UpperCAmelCase : Union[str, Any] =bool(isinstance(snake_case , (list, tuple)) and (isinstance(html_strings[0] , snake_case)))
if not is_batched:
_UpperCAmelCase : Dict =[html_strings]
# Get nodes + xpaths
_UpperCAmelCase : str =[]
_UpperCAmelCase : str =[]
for html_string in html_strings:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] =self.get_three_from_single(snake_case)
nodes.append(snake_case)
_UpperCAmelCase : Optional[int] =[]
for node, tag_list, sub_list in zip(snake_case , snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =self.construct_xpath(snake_case , snake_case)
xpath_strings.append(snake_case)
xpaths.append(snake_case)
# return as Dict
_UpperCAmelCase : Tuple ={'nodes': nodes, 'xpaths': xpaths}
_UpperCAmelCase : Union[str, Any] =BatchFeature(data=snake_case , tensor_type=snake_case)
return encoded_inputs
| 366
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_UpperCAmelCase : List[Any] =[image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase : List[Any] =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_UpperCAmelCase : List[str] =np.concatenate(__lowerCamelCase , axis=0 )
_UpperCAmelCase : Optional[Any] =np.array(__lowerCamelCase ).astype(np.floataa ) / 2_55.0
_UpperCAmelCase : List[Any] =image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase : str =2.0 * image - 1.0
_UpperCAmelCase : Optional[Any] =torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase : List[Any] =torch.cat(__lowerCamelCase , dim=0 )
return image
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=0.99_95 ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , np.ndarray ):
_UpperCAmelCase : Optional[Any] =True
_UpperCAmelCase : int =va.device
_UpperCAmelCase : List[Any] =va.cpu().numpy()
_UpperCAmelCase : Tuple =va.cpu().numpy()
_UpperCAmelCase : Any =np.sum(va * va / (np.linalg.norm(__lowerCamelCase ) * np.linalg.norm(__lowerCamelCase )) )
if np.abs(__lowerCamelCase ) > DOT_THRESHOLD:
_UpperCAmelCase : Union[str, Any] =(1 - t) * va + t * va
else:
_UpperCAmelCase : Optional[int] =np.arccos(__lowerCamelCase )
_UpperCAmelCase : Tuple =np.sin(__lowerCamelCase )
_UpperCAmelCase : str =theta_a * t
_UpperCAmelCase : List[Any] =np.sin(__lowerCamelCase )
_UpperCAmelCase : List[str] =np.sin(theta_a - theta_t ) / sin_theta_a
_UpperCAmelCase : str =sin_theta_t / sin_theta_a
_UpperCAmelCase : int =sa * va + sa * va
if inputs_are_torch:
_UpperCAmelCase : Union[str, Any] =torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
return va
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =F.normalize(__lowerCamelCase , dim=-1 )
_UpperCAmelCase : List[Any] =F.normalize(__lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in model.parameters():
_UpperCAmelCase : Dict =value
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case , text_encoder=snake_case , clip_model=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , coca_model=snake_case , coca_tokenizer=snake_case , coca_transform=snake_case , )
_UpperCAmelCase : List[Any] =(
feature_extractor.size
if isinstance(feature_extractor.size , snake_case)
else feature_extractor.size['shortest_edge']
)
_UpperCAmelCase : str =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , snake_case)
set_requires_grad(self.clip_model , snake_case)
def lowerCAmelCase ( self , snake_case = "auto") -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Union[str, Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
self.enable_attention_slicing(snake_case)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
set_requires_grad(self.vae , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
set_requires_grad(self.vae , snake_case)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
set_requires_grad(self.unet , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
set_requires_grad(self.unet , snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
# get the original timestep using init_timestep
_UpperCAmelCase : Union[str, Any] =min(int(num_inference_steps * strength) , snake_case)
_UpperCAmelCase : Any =max(num_inference_steps - init_timestep , 0)
_UpperCAmelCase : int =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None) -> Optional[int]:
'''simple docstring'''
if not isinstance(snake_case , torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case)}")
_UpperCAmelCase : str =image.to(device=snake_case , dtype=snake_case)
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =[
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(snake_case)
]
_UpperCAmelCase : Tuple =torch.cat(snake_case , dim=0)
else:
_UpperCAmelCase : List[Any] =self.vae.encode(snake_case).latent_dist.sample(snake_case)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[int] =0.1_82_15 * init_latents
_UpperCAmelCase : List[str] =init_latents.repeat_interleave(snake_case , dim=0)
_UpperCAmelCase : Union[str, Any] =randn_tensor(init_latents.shape , generator=snake_case , device=snake_case , dtype=snake_case)
# get latents
_UpperCAmelCase : Optional[int] =self.scheduler.add_noise(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[Any] =init_latents
return latents
def lowerCAmelCase ( self , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.coca_transform(snake_case).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_UpperCAmelCase : str =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
_UpperCAmelCase : Tuple =self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>' , '').rstrip(' .,')
def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any =self.feature_extractor.preprocess(snake_case)
_UpperCAmelCase : Optional[Any] =torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
_UpperCAmelCase : Dict =self.clip_model.get_image_features(snake_case)
_UpperCAmelCase : int =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case)
_UpperCAmelCase : List[str] =image_embeddings_clip.repeat_interleave(snake_case , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =latents.detach().requires_grad_()
_UpperCAmelCase : str =self.scheduler.scale_model_input(snake_case , snake_case)
# predict the noise residual
_UpperCAmelCase : int =self.unet(snake_case , snake_case , encoder_hidden_states=snake_case).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_UpperCAmelCase : Optional[int] =self.scheduler.alphas_cumprod[timestep]
_UpperCAmelCase : Any =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCAmelCase : str =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_UpperCAmelCase : Union[str, Any] =torch.sqrt(snake_case)
_UpperCAmelCase : List[str] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case):
_UpperCAmelCase : Optional[int] =self.scheduler.sigmas[index]
_UpperCAmelCase : Tuple =latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Tuple =1 / 0.1_82_15 * sample
_UpperCAmelCase : Optional[Any] =self.vae.decode(snake_case).sample
_UpperCAmelCase : Tuple =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : int =transforms.Resize(self.feature_extractor_size)(snake_case)
_UpperCAmelCase : Optional[int] =self.normalize(snake_case).to(latents.dtype)
_UpperCAmelCase : str =self.clip_model.get_image_features(snake_case)
_UpperCAmelCase : str =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case)
_UpperCAmelCase : Optional[int] =spherical_dist_loss(snake_case , snake_case).mean() * clip_guidance_scale
_UpperCAmelCase : List[str] =-torch.autograd.grad(snake_case , snake_case)[0]
if isinstance(self.scheduler , snake_case):
_UpperCAmelCase : Optional[Any] =latents.detach() + grads * (sigma**2)
_UpperCAmelCase : str =noise_pred_original
else:
_UpperCAmelCase : str =noise_pred_original - torch.sqrt(snake_case) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = 5_1_2 , snake_case = 5_1_2 , snake_case = 0.6 , snake_case = 5_0 , snake_case = 7.5 , snake_case = 1 , snake_case = 0.0 , snake_case = 1_0_0 , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = 0.8 , snake_case = 0.1 , snake_case = 0.1 , ) -> List[str]:
'''simple docstring'''
if isinstance(snake_case , snake_case) and len(snake_case) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(snake_case , torch.Generator) and batch_size > 1:
_UpperCAmelCase : List[str] =[generator] + [None] * (batch_size - 1)
_UpperCAmelCase : Tuple =[
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_UpperCAmelCase : Tuple =[x[0] for x in coca_is_none if x[1]]
_UpperCAmelCase : Union[str, Any] =', '.join(snake_case)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_UpperCAmelCase : Optional[int] =self.get_image_description(snake_case)
if style_prompt is None:
if len(snake_case):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_UpperCAmelCase : List[str] =self.get_image_description(snake_case)
# get prompt text embeddings for content and style
_UpperCAmelCase : Optional[Any] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : Dict =self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_UpperCAmelCase : Optional[int] =self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase : Tuple =self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_UpperCAmelCase : List[Any] =slerp(snake_case , snake_case , snake_case)
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Optional[Any] =text_embeddings.repeat_interleave(snake_case , dim=0)
# set timesteps
_UpperCAmelCase : Any ='offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_UpperCAmelCase : int ={}
if accepts_offset:
_UpperCAmelCase : Union[str, Any] =1
self.scheduler.set_timesteps(snake_case , **snake_case)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_UpperCAmelCase , _UpperCAmelCase : int =self.get_timesteps(snake_case , snake_case , self.device)
_UpperCAmelCase : Dict =timesteps[:1].repeat(snake_case)
# Preprocess image
_UpperCAmelCase : int =preprocess(snake_case , snake_case , snake_case)
_UpperCAmelCase : Tuple =self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case)
_UpperCAmelCase : Optional[Any] =preprocess(snake_case , snake_case , snake_case)
_UpperCAmelCase : List[Any] =self.prepare_latents(
snake_case , snake_case , snake_case , text_embeddings.dtype , self.device , snake_case)
_UpperCAmelCase : List[Any] =slerp(snake_case , snake_case , snake_case)
if clip_guidance_scale > 0:
_UpperCAmelCase : Optional[int] =self.get_clip_image_embeddings(snake_case , snake_case)
_UpperCAmelCase : int =self.get_clip_image_embeddings(snake_case , snake_case)
_UpperCAmelCase : Dict =slerp(
snake_case , snake_case , snake_case)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase : int =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase : Union[str, Any] =content_text_input.input_ids.shape[-1]
_UpperCAmelCase : List[str] =self.tokenizer([''] , padding='max_length' , max_length=snake_case , return_tensors='pt')
_UpperCAmelCase : Union[str, Any] =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_UpperCAmelCase : List[Any] =uncond_embeddings.repeat_interleave(snake_case , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : Any =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase : str =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase : Union[str, Any] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_UpperCAmelCase : int =torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case).to(
self.device)
else:
_UpperCAmelCase : Optional[int] =torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
_UpperCAmelCase : List[str] =latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase : str =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase : List[str] ='eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
_UpperCAmelCase : Union[str, Any] ={}
if accepts_eta:
_UpperCAmelCase : Optional[int] =eta
# check if the scheduler accepts generator
_UpperCAmelCase : Union[str, Any] ='generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_UpperCAmelCase : Dict =generator
with self.progress_bar(total=snake_case):
for i, t in enumerate(snake_case):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : Dict =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[int] =self.scheduler.scale_model_input(snake_case , snake_case)
# predict the noise residual
_UpperCAmelCase : Optional[int] =self.unet(snake_case , snake_case , encoder_hidden_states=snake_case).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : int =noise_pred.chunk(2)
_UpperCAmelCase : Dict =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_UpperCAmelCase : Tuple =(
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_UpperCAmelCase , _UpperCAmelCase : Optional[int] =self.cond_fn(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : List[str] =self.scheduler.step(snake_case , snake_case , snake_case , **snake_case).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_UpperCAmelCase : Optional[Any] =1 / 0.1_82_15 * latents
_UpperCAmelCase : Optional[int] =self.vae.decode(snake_case).sample
_UpperCAmelCase : str =(image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] =self.numpy_to_pil(snake_case)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case)
| 242
| 0
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowercase ( __lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = 'data2vec-audio'
def __init__( self : Any , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : Optional[Any]=768 , lowerCAmelCase__ : str=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : Optional[Any]=3072 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : int=1E-5 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__ : int=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : int=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Tuple=16 , lowerCAmelCase__ : Union[str, Any]=19 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Optional[Any]=0.05 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Dict=10 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Tuple="sum" , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=(512, 512, 512, 512, 1500) , lowerCAmelCase__ : List[str]=(5, 3, 3, 1, 1) , lowerCAmelCase__ : Any=(1, 2, 3, 1, 1) , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Optional[int]=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : str=False , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : int , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = feat_extract_activation
SCREAMING_SNAKE_CASE_: Optional[Any] = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[int] = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[Any] = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Tuple = conv_bias
SCREAMING_SNAKE_CASE_: List[str] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_: List[str] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_: Union[str, Any] = conv_pos_kernel_size
SCREAMING_SNAKE_CASE_: Any = len(self.conv_dim)
SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[str] = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_: Tuple = activation_dropout
SCREAMING_SNAKE_CASE_: Any = feat_proj_dropout
SCREAMING_SNAKE_CASE_: List[str] = final_dropout
SCREAMING_SNAKE_CASE_: Optional[Any] = layerdrop
SCREAMING_SNAKE_CASE_: Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_: str = initializer_range
SCREAMING_SNAKE_CASE_: Tuple = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_: List[Any] = mask_time_prob
SCREAMING_SNAKE_CASE_: Dict = mask_time_length
SCREAMING_SNAKE_CASE_: int = mask_time_min_masks
SCREAMING_SNAKE_CASE_: Optional[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_: Any = mask_feature_length
SCREAMING_SNAKE_CASE_: Optional[int] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_: int = ctc_loss_reduction
SCREAMING_SNAKE_CASE_: str = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_: Optional[int] = add_adapter
SCREAMING_SNAKE_CASE_: int = adapter_kernel_size
SCREAMING_SNAKE_CASE_: Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE_: str = num_adapter_layers
SCREAMING_SNAKE_CASE_: List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_: List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_: Optional[int] = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[Any] = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Any = list(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Any = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self : str):
return math.prod(self.conv_stride)
| 13
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
| 0
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase : str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
UpperCAmelCase : Tuple = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=False):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase)["wer"]
else:
lowercase_ = 0
lowercase_ = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase):
lowercase_ = compute_measures(__UpperCamelCase , __UpperCamelCase)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 360
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def a_ ( __snake_case : str , __snake_case : str , **__snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ =AutoConfig.from_pretrained(__snake_case , **__snake_case )
lowerCamelCase_ =AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 75
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase, param_name='''crop_size''' )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ =do_convert_rgb
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, param_name='''crop_size''', default_to_square=lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 75
| 1
|
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Any = """docs/source/en/_toctree.yml"""
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = defaultdict(_UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_: List[Any] = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_: List[Any] = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_: Dict = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_UpperCAmelCase ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : s["title"].lower() )
def A_ ( _UpperCAmelCase=False ):
with open(_UpperCAmelCase , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: int = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_: Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_: List[Any] = content[api_idx]["sections"]
# Then to the model doc
SCREAMING_SNAKE_CASE_: Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = api_doc[model_idx]["sections"]
SCREAMING_SNAKE_CASE_: Optional[int] = [(idx, section) for idx, section in enumerate(_UpperCAmelCase ) if "sections" in section]
SCREAMING_SNAKE_CASE_: List[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_: List[str] = modality_doc["sections"]
SCREAMING_SNAKE_CASE_: List[Any] = clean_model_doc_toc(_UpperCAmelCase )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_: Optional[int] = True
if overwrite:
SCREAMING_SNAKE_CASE_: Union[str, Any] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_: str = model_doc
SCREAMING_SNAKE_CASE_: str = api_doc
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_UpperCAmelCase , allow_unicode=_UpperCAmelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : List[str] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 127
|
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase : Optional[int] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , "words.txt" )
SCREAMING_SNAKE_CASE_: Dict = ""
with open(_UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int = f.readline()
SCREAMING_SNAKE_CASE_: Optional[int] = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
SCREAMING_SNAKE_CASE_: List[Any] = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 127
| 1
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(_lowerCAmelCase ), magnitude * sin(_lowerCAmelCase )]
return [magnitude * cos(radians(_lowerCAmelCase ) ), magnitude * sin(radians(_lowerCAmelCase ) )]
def snake_case_ ( _lowerCAmelCase : NDArray[floataa] , _lowerCAmelCase : NDArray[floataa] , _lowerCAmelCase : float = 10**-1 ) -> bool:
UpperCAmelCase : NDArray[floataa] = cross(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : float = sum(_lowerCAmelCase )
return abs(_lowerCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase__: List[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
UpperCamelCase__: NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase__: Optional[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCamelCase__: int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase__: List[Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCamelCase__: List[str] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 23
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=5 ) -> Union[str, Any]:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
_UpperCamelCase = torch.tensor(tokenizer.encode(__snake_case, add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
_UpperCamelCase = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
_UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCamelCase = logits[0, masked_index, :]
_UpperCamelCase = logits.softmax(dim=0 )
_UpperCamelCase , _UpperCamelCase = prob.topk(k=__snake_case, dim=0 )
_UpperCamelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
_UpperCamelCase = tokenizer.mask_token
_UpperCamelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_UpperCamelCase = predicted_token_bpe.replace('''\u2581''', ''' ''' )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__snake_case ), __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case, __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_a = CamembertTokenizer.from_pretrained("""camembert-base""")
_a = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
_a = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 194
| 0
|
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> List[str]:
return base * power(_lowerCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
_lowerCAmelCase : Tuple = int(input('''Enter the base: ''').strip())
_lowerCAmelCase : Any = int(input('''Enter the exponent: ''').strip())
_lowerCAmelCase : List[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowerCAmelCase : Union[str, Any] = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 354
|
from collections.abc import Sequence
def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A_ : Any = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
A_ : Any = nums[i]
A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip())
_lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 70
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3_6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = embedding_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_hidden_groups
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , sentence_order_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Optional[int] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = True
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False):
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
if return_labels:
if model_class in get_values(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AlbertModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""")
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4))
| 100
| 0
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = parent
def lowerCAmelCase_ ( self : Dict ):
return {}
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
SCREAMING_SNAKE_CASE_ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase_ ( self : List[str] ):
# Initialize feature_extractor
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE_ = get_html_strings()[0]
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase )
# fmt: off
SCREAMING_SNAKE_CASE_ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
SCREAMING_SNAKE_CASE_ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCAmelCase )
self.assertEqual(encoding.xpaths , _lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE_ = get_html_strings()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase )
# fmt: off
SCREAMING_SNAKE_CASE_ = expected_nodes + [['My First Heading', 'My first paragraph.']]
SCREAMING_SNAKE_CASE_ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _lowerCAmelCase )
self.assertEqual(encoding.xpaths , _lowerCAmelCase )
| 350
|
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 210
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowercase = logging.get_logger(__name__)
__lowercase = '''T5Config'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''mt5'''
UpperCAmelCase_ : Tuple = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''mt5'''
UpperCAmelCase_ : int = MTaConfig
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '''mt5'''
UpperCAmelCase_ : Union[str, Any] = MTaConfig
| 272
| 1
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowerCAmelCase_ = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def snake_case( __magic_name__ , __magic_name__=1 , __magic_name__=2_56 ) -> List[Any]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case( __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
with open(__magic_name__ , '''r''' ) as f:
return json.load(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Tuple:
'''simple docstring'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : List[Any] = os.path.join(__magic_name__ , '''tmp''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : List[Any] = read_json(os.path.join(__magic_name__ , '''params.json''' ) )
lowercase : int = NUM_SHARDS[model_size]
lowercase : str = params['''n_layers''']
lowercase : Optional[int] = params['''n_heads''']
lowercase : str = n_heads // num_shards
lowercase : Dict = params['''dim''']
lowercase : int = dim // n_heads
lowercase : List[str] = 1_0_0_0_0.0
lowercase : Optional[Any] = 1.0 / (base ** (torch.arange(0 , __magic_name__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
lowercase : Union[str, Any] = n_heads_per_shard // num_key_value_heads
lowercase : Any = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase : Optional[Any] = n_heads
lowercase : str = n_heads_per_shard
lowercase : Any = dim
# permute for sliced rotary
def permute(__magic_name__ , __magic_name__=n_heads , __magic_name__=dim , __magic_name__=dim ):
return w.view(__magic_name__ , dima // n_heads // 2 , 2 , __magic_name__ ).transpose(1 , 2 ).reshape(__magic_name__ , __magic_name__ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase : Tuple = torch.load(os.path.join(__magic_name__ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
lowercase : str = [
torch.load(os.path.join(__magic_name__ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(__magic_name__ )
]
lowercase : Tuple = 0
lowercase : str = {'''weight_map''': {}}
for layer_i in range(__magic_name__ ):
lowercase : List[Any] = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase : int = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase : List[Any] = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowercase : Tuple = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) )
lowercase : Any = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , )
lowercase : Any = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
__magic_name__ , __magic_name__ , __magic_name__ )
for i in range(__magic_name__ )
] , dim=0 , ).reshape(__magic_name__ , __magic_name__ )
lowercase : int = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(__magic_name__ )] , dim=1 )
lowercase : str = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(__magic_name__ )] , dim=0 )
lowercase : Any = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(__magic_name__ )] , dim=1 )
lowercase : List[Any] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(__magic_name__ )] , dim=0 )
lowercase : List[Any] = inv_freq
for k, v in state_dict.items():
lowercase : Tuple = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
lowercase : Tuple = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase : Optional[int] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
lowercase : Tuple = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__magic_name__ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__magic_name__ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase : Tuple = filename
param_count += v.numel()
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
# Write configs
lowercase : Tuple = {'''total_size''': param_count * 2}
write_json(__magic_name__ , os.path.join(__magic_name__ , '''pytorch_model.bin.index.json''' ) )
lowercase : Tuple = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
lowercase : Tuple = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
lowercase : List[Any] = LlamaConfig(
hidden_size=__magic_name__ , intermediate_size=compute_intermediate_size(__magic_name__ , __magic_name__ , __magic_name__ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__magic_name__ , )
config.save_pretrained(__magic_name__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
lowercase : Dict = LlamaForCausalLM.from_pretrained(__magic_name__ , torch_dtype=torch.floataa , low_cpu_mem_usage=__magic_name__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__magic_name__ , safe_serialization=__magic_name__ )
shutil.rmtree(__magic_name__ )
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : str = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowercase : Tuple = tokenizer_class(__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=__magic_name__ , help='''Whether or not to save using `safetensors`.''' )
lowercase : List[str] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase : Any = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , __magic_name__ )
if __name__ == "__main__":
main()
| 116
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase : str = [p / w for p, w in zip(__magic_name__ , __magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase : str = sorted(__magic_name__ )
# declaring useful variables
lowercase : Union[str, Any] = len(__magic_name__ )
lowercase : Optional[int] = 0
lowercase : Optional[int] = 0
lowercase : int = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase : Optional[int] = sorted_profit_by_weight[length - i - 1]
lowercase : Union[str, Any] = profit_by_weight.index(__magic_name__ )
lowercase : Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase_ = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase_ = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase_ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 116
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Optional[int] = "funnel"
lowerCAmelCase_ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , a__=30_522 , a__=[4, 4, 4] , a__=None , a__=2 , a__=768 , a__=12 , a__=64 , a__=3_072 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=None , a__=1e-9 , a__="mean" , a__="relative_shift" , a__=True , a__=True , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = [1] * len(a__ ) if block_repeats is None else block_repeats
assert len(a__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = initializer_range
snake_case_ = initializer_std
snake_case_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
snake_case_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
snake_case_ = attention_type
snake_case_ = separate_cls
snake_case_ = truncate_seq
snake_case_ = pool_q_only
super().__init__(**a__ )
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase__ ( self , a__ ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 85
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1_000) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = 1
__UpperCamelCase : Any = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
__UpperCamelCase : Any = []
__UpperCamelCase : Union[str, Any] = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
__UpperCamelCase : Optional[int] = len(_lowerCamelCase)
__UpperCamelCase : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 22) -> int:
'''simple docstring'''
__UpperCamelCase : Any = range(1 , _lowerCamelCase)
__UpperCamelCase : int = range(1 , _lowerCamelCase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"{solution(10, 22) = }")
| 151
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A = logging.get_logger(__name__)
__A = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _snake_case ( a_ ):
snake_case__ = "dpt"
def __init__( self : List[str] , UpperCAmelCase : int=768 , UpperCAmelCase : Any=12 , UpperCAmelCase : str=12 , UpperCAmelCase : Optional[int]=3072 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[Any]=1E-12 , UpperCAmelCase : List[str]=384 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : List[str]=False , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=[2, 5, 8, 11] , UpperCAmelCase : Optional[int]="project" , UpperCAmelCase : Union[str, Any]=[4, 2, 1, 0.5] , UpperCAmelCase : int=[96, 192, 384, 768] , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Dict=-1 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=0.4 , UpperCAmelCase : List[str]=255 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=[1, 1024, 24, 24] , UpperCAmelCase : Dict=[0, 1] , UpperCAmelCase : int=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
__lowerCamelCase : Optional[int] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
__lowerCamelCase : Union[str, Any] = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
__lowerCamelCase : int = BitConfig(**UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Optional[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
__lowerCamelCase : Optional[int] = backbone_featmap_shape
__lowerCamelCase : List[str] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = None
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : str = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : int = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
__lowerCamelCase : List[str] = readout_type
__lowerCamelCase : Optional[int] = reassemble_factors
__lowerCamelCase : Tuple = neck_hidden_sizes
__lowerCamelCase : Any = fusion_hidden_size
__lowerCamelCase : str = head_in_index
__lowerCamelCase : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowerCamelCase : List[str] = use_auxiliary_head
__lowerCamelCase : List[Any] = auxiliary_loss_weight
__lowerCamelCase : Dict = semantic_loss_ignore_index
__lowerCamelCase : Union[str, Any] = semantic_classifier_dropout
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : int = self.__class__.model_type
return output
| 135
|
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self._create_example_records()
lowerCAmelCase__ : Tuple = Dataset.from_list(UpperCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase ):
self.assertDictEqual(UpperCamelCase , example_records[i] )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self._create_example_records()
lowerCAmelCase__ : Optional[Any] = Dataset.from_list(UpperCamelCase )
lowerCAmelCase__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
lowerCAmelCase__ : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase__ : int = Dataset.from_list(UpperCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _lowerCAmelCase ( self : str ) -> Dict: # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase__ : Optional[int] = Dataset.from_list(UpperCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 242
| 0
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__snake_case = logging.get_logger(__name__)
def a ( __a , __a , __a ) -> None:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f'''{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__snake_case = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__snake_case = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def a ( __a , __a ) -> Tuple:
'''simple docstring'''
try:
UpperCamelCase__ :List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(UpperCamelCase__ ) )
def a ( __a , __a ) -> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a ( __a , __a = "student" , __a = None , __a = None , __a=False , __a=None , __a=None , **__a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
UpperCamelCase__ :List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f'''teacher must be a model or string got type {type(UpperCamelCase__ )}'''
UpperCamelCase__ :Any = teacher.config.to_diff_dict()
try:
UpperCamelCase__ , UpperCamelCase__ :int = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase__ :str = teacher_e
if d is None:
UpperCamelCase__ :List[str] = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase__ :Dict = teacher_e
if d is None:
UpperCamelCase__ :List[Any] = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
UpperCamelCase__ :Tuple = teacher.config_class(**UpperCamelCase__ )
UpperCamelCase__ :List[Any] = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase__ :Optional[int] = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase__ :Union[str, Any] = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
UpperCamelCase__ :Tuple = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
UpperCamelCase__ :Tuple = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 367
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case = get_logger()
__snake_case = None
class lowercase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(features=UpperCamelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase_ )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ :Tuple = device if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Optional[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
UpperCamelCase__ :Optional[int] = str(jax.devices()[0] )
UpperCamelCase__ :Tuple = jnp_array_kwargs
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase_ ): device for device in jax.devices()}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase_ , axis=0 )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ :Optional[int] = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ :List[str] = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ :Union[str, Any] = {'''dtype''': jnp.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ :Optional[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase__ :str = np.asarray(UpperCamelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase_ , '''__array__''' ) and not isinstance(UpperCamelCase_ , jax.Array ):
UpperCamelCase__ :int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
UpperCamelCase__ :Dict = self.recursive_tensorize(UpperCamelCase_ )
UpperCamelCase__ :str = self._consolidate(UpperCamelCase_ )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.python_features_decoder.decode_batch(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
UpperCamelCase__ :Optional[int] = self._consolidate(batch[column_name] )
return batch
| 219
| 0
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['GLPNFeatureExtractor']
_UpperCAmelCase = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 512,
}
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="<pad>" , _snake_case : int="</s>" , _snake_case : Any="<unk>" , _snake_case : Union[str, Any]="<mask_2>" , _snake_case : Any="<mask_1>" , _snake_case : Optional[int]=None , _snake_case : List[str]=103 , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , )->None:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
__lowerCAmelCase : List[str] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__lowerCAmelCase : Dict = additional_special_tokens_extended
else:
__lowerCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )]
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__lowerCAmelCase : Optional[Any] = mask_token_sent
__lowerCAmelCase : Any = vocab_file
__lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__lowerCAmelCase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def UpperCAmelCase__ ( self : Dict )->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.__dict__.copy()
__lowerCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Any , _snake_case : str )->Any:
'''simple docstring'''
__lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Dict , _snake_case : str )->List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def UpperCAmelCase__ ( self : Tuple , _snake_case : str )->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowerCAmelCase : Any = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def UpperCAmelCase__ ( self : List[Any] , _snake_case : int )->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowerCAmelCase : Optional[int] = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Optional[int] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__lowerCAmelCase : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Dict=False )->int:
'''simple docstring'''
return 1
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCAmelCase__ ( self : List[str] , _snake_case : List , _snake_case : Optional[List] = None , _snake_case : bool = False )->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCAmelCase__ ( self : Any , _snake_case : Union[str, Any] , _snake_case : Tuple=None )->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Any , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCAmelCase : Optional[int] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
__lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 232
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCAmelCase_ = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class lowercase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a : List[str] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__( self, __magic_name__, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="</s>", __magic_name__="<s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<mask>", __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Optional[int] = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else mask_token
UpperCamelCase__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, )
UpperCamelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCamelCase__ : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Union[str, Any] = len(self.sp_model ) + self.fairseq_offset
UpperCamelCase__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = self.__dict__.copy()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
UpperCamelCase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase_, out_type=lowercase_ )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : int = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = ''''''.join(lowercase_ ).replace(lowercase_, ''' ''' ).strip()
return out_string
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Optional[Any] = os.path.join(
lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_, '''wb''' ) as fi:
UpperCamelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 201
|
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase_ = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowerCAmelCase_ = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowerCAmelCase_ = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[List[List[str]]] , UpperCamelCase : List[List[str]] , UpperCamelCase : int = 1 , UpperCamelCase : int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase_ , hypotheses=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ )
}
| 365
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
| 0
|
"""simple docstring"""
from __future__ import annotations
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case = "mask2former"
snake_case = ["swin"]
snake_case = {"hidden_size": "hidden_dim"}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 1024 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 6 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 8 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 2048 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 255 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 2.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 1_2544 , _SCREAMING_SNAKE_CASE = 3.0 , _SCREAMING_SNAKE_CASE = 0.7_5 , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->Union[str, Any]:
'''simple docstring'''
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
A_ : Tuple = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_SCREAMING_SNAKE_CASE , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : str = backbone_config.pop('''model_type''' )
A_ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A_ : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
A_ : Optional[int] = backbone_config
A_ : int = feature_size
A_ : Tuple = mask_feature_size
A_ : List[Any] = hidden_dim
A_ : int = encoder_feedforward_dim
A_ : Dict = activation_function
A_ : Tuple = encoder_layers
A_ : str = decoder_layers
A_ : List[Any] = num_attention_heads
A_ : str = dropout
A_ : Optional[Any] = dim_feedforward
A_ : Any = pre_norm
A_ : Any = enforce_input_projection
A_ : Optional[int] = common_stride
A_ : List[Any] = ignore_value
A_ : List[Any] = num_queries
A_ : Optional[Any] = no_object_weight
A_ : int = class_weight
A_ : Any = mask_weight
A_ : Union[str, Any] = dice_weight
A_ : Any = train_num_points
A_ : Tuple = oversample_ratio
A_ : List[Any] = importance_sample_ratio
A_ : Any = init_std
A_ : Dict = init_xavier_std
A_ : List[Any] = use_auxiliary_loss
A_ : Optional[Any] = feature_strides
A_ : List[str] = output_auxiliary_logits
A_ : Dict = decoder_layers
super().__init__(**_SCREAMING_SNAKE_CASE )
@classmethod
def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
return cls(
backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
A_ : Dict = self.backbone_config.to_dict()
A_ : int = self.__class__.model_type
return output
| 353
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A_ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Any = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : Union[str, Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : List[str] = '''lower newer'''
A_ : Tuple = processor(text=_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''lower newer'''
A_ : List[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : int = '''lower newer'''
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 65
| 0
|
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ) -> int:
A = ''
A = ''
A = []
A = 0
A = 256
A = 0
A = 0
A = 0
A = 0
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[str] ) -> int:
A = cva.imread(A_ ,0 )
A = copy.deepcopy(self.img )
A , A , A = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label='x' )
A = np.sum(A_ )
for i in range(len(A_ ) ):
A = x[i] / self.k
self.sk += prk
A = (self.L - 1) * self.sk
if self.rem != 0:
A = int(last % last )
A = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
A = int(np.ma.count(self.img ) / self.img[1].size )
A = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
A = self.img[j][i]
if num != self.last_list[num]:
A = self.last_list[num]
cva.imwrite('output_data/output.jpg' ,self.img )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
cva.imshow('Output-Image' ,self.img )
cva.imshow('Input-Image' ,self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowercase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_lowercase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 74
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __lowerCamelCase ( a_ : str , a_ : Dict , a_ : Any , a_ : str ) -> str:
__SCREAMING_SNAKE_CASE :int = s.rsplit(a_ , a_ )
return new.join(a_ )
def __lowerCamelCase ( a_ : List[str] ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( a_ : Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE :Optional[int] = {}
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__SCREAMING_SNAKE_CASE :Optional[Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
__SCREAMING_SNAKE_CASE :str = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
__SCREAMING_SNAKE_CASE :List[Any] = rreplace(a_ , '''.b''' , '''.bias''' , 1 )
__SCREAMING_SNAKE_CASE :Optional[Any] = value.float()
return upgrade
@torch.no_grad()
def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int]=None , a_ : Dict=True ) -> Union[str, Any]:
from dall_e import Encoder
__SCREAMING_SNAKE_CASE :int = Encoder()
if os.path.exists(a_ ):
__SCREAMING_SNAKE_CASE :Dict = torch.load(a_ )
else:
__SCREAMING_SNAKE_CASE :List[str] = torch.hub.load_state_dict_from_url(a_ )
if isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :List[str] = ckpt.state_dict()
encoder.load_state_dict(a_ )
if config_path is not None:
__SCREAMING_SNAKE_CASE :Any = FlavaImageCodebookConfig.from_pretrained(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = FlavaImageCodebookConfig()
__SCREAMING_SNAKE_CASE :Tuple = FlavaImageCodebook(a_ ).eval()
__SCREAMING_SNAKE_CASE :List[str] = encoder.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = upgrade_state_dict(a_ )
hf_model.load_state_dict(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = hf_model.state_dict()
__SCREAMING_SNAKE_CASE :Union[str, Any] = count_parameters(a_ )
__SCREAMING_SNAKE_CASE :Any = count_parameters(a_ )
assert torch.allclose(a_ , a_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(a_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCamelCase_ = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 191
| 0
|
from math import factorial
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_UpperCAmelCase : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_UpperCAmelCase : Any = float(factorial(lowerCAmelCase ) )
coefficient /= factorial(lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 189
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__A = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__A = logging.getLogger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = label_idx
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = mode.value
lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :List[str] = 1
lowerCAmelCase__ :Union[str, Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :List[str] = []
else:
lowerCAmelCase__ :List[str] = line.split(' ' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__UpperCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :str = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Optional[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = mode.value
lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :Any = 1
lowerCAmelCase__ :Optional[Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Dict = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = preds_list[example_id]
lowerCAmelCase__ :Tuple = ''
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 293
| 1
|
from __future__ import annotations
from collections.abc import MutableSequence
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
_A = list(lowerCAmelCase_ )
_A = degree
def __add__( self , lowerCAmelCase_ ) -> Polynomial:
if self.degree > polynomial_a.degree:
_A = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
_A = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self , lowerCAmelCase_ ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCAmelCase_ ) -> Polynomial:
_A = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int | float:
_A = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
_A = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def UpperCAmelCase ( self ) -> Polynomial:
_A = [0] * self.degree
for i in range(self.degree ):
_A = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ = 0 ) -> Polynomial:
_A = [0] * (self.degree + 2)
_A = constant
for i in range(self.degree + 1 ):
_A = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self , lowerCAmelCase_ ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCAmelCase_ ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 81
|
import colorsys
from PIL import Image # type: ignore
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :int) -> float:
_A = x
_A = y
for step in range(snake_case__): # noqa: B007
_A = a * a - b * b + x
_A = 2 * a * b + y
_A = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case ( snake_case__ :float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1))
def snake_case ( snake_case__ :int = 800 , snake_case__ :int = 600 , snake_case__ :float = -0.6 , snake_case__ :float = 0 , snake_case__ :float = 3.2 , snake_case__ :int = 50 , snake_case__ :bool = True , ) -> Image.Image:
_A = Image.new("""RGB""" , (image_width, image_height))
_A = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__):
for image_y in range(snake_case__):
# determine the figure-coordinates based on the image-coordinates
_A = figure_width / image_width * image_height
_A = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A = get_distance(snake_case__ , snake_case__ , snake_case__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A = get_color_coded_rgb(snake_case__)
else:
_A = get_black_and_white_rgb(snake_case__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 81
| 1
|
import copy
import re
class A :
'''simple docstring'''
__lowerCamelCase : Dict = '''hp'''
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Any = None
@classmethod
def a_ ( cls : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
A__ = prefix
A__ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> str:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return ""
A__ = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCAmelCase ) + 1 ):
A__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCAmelCase : List[str] ):
A__ = """"""
while integer != 0:
A__ = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
A__ = 0
while True:
A__ = word + """#""" + int_to_alphabetic(__lowerCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
A__ = sword
break
A__ = short_word
A__ = word
return short_word
@staticmethod
def a_ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
A__ = param_name.split("""_""" )
A__ = [TrialShortNamer.shortname_for_word(__lowerCAmelCase , __lowerCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A__ = ["""""", """_"""]
for separator in separators:
A__ = separator.join(__lowerCAmelCase )
if shortname not in info["reverse_short_param"]:
A__ = shortname
A__ = param_name
return shortname
return param_name
@staticmethod
def a_ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = TrialShortNamer.shortname_for_key(__lowerCAmelCase , __lowerCAmelCase )
A__ = short_name
A__ = param_name
@classmethod
def a_ ( cls : Dict ) -> Tuple:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
A__ = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
A__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCAmelCase , __lowerCAmelCase )
A__ = info
@classmethod
def a_ ( cls : str , __lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
A__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A__ = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = 1 if v else 0
A__ = """""" if isinstance(__lowerCAmelCase , (int, float) ) else """-"""
A__ = f'{key}{sep}{v}'
name.append(__lowerCAmelCase )
return "_".join(__lowerCAmelCase )
@classmethod
def a_ ( cls : List[str] , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
A__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A__ = []
else:
A__ = repr.split("""_""" )
A__ = {}
for value in values:
if "-" in value:
A__ , A__ = value.split("""-""" )
else:
A__ = re.sub("""[0-9.]""" , """""" , __lowerCAmelCase )
A__ = float(re.sub("""[^0-9.]""" , """""" , __lowerCAmelCase ) )
A__ = cls.NAMING_INFO["""reverse_short_param"""][p_k]
A__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A__ = cls.DEFAULTS[k]
return parameters
| 274
|
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274
| 1
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : Tuple ) -> None:
print(self.vertex )
for i in self.vertex:
print(__magic_name__ , " -> " , " -> ".join([str(__magic_name__ ) for j in self.vertex[i]] ) )
def __A ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__magic_name__ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def __A ( self : List[Any] ) -> None:
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
def __A ( self : Tuple , __magic_name__ : int , __magic_name__ : list ) -> None:
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(__magic_name__ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
A : Tuple = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 352
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''blenderbot-small'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __magic_name__ : Dict=50_265 , __magic_name__ : str=512 , __magic_name__ : List[Any]=8 , __magic_name__ : Any=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Any=8 , __magic_name__ : Optional[int]=2_048 , __magic_name__ : Dict=16 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Any=True , __magic_name__ : Dict="gelu" , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[Any]=False , __magic_name__ : str=0 , __magic_name__ : Dict=1 , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Optional[Any] , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ = {0: "batch"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super().outputs
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __A ( self : int , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE_ = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_ = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ = min(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = max(__magic_name__ , __magic_name__ ) - min_num_layers
SCREAMING_SNAKE_CASE_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __A ( self : Union[str, Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.num_attention_heads
SCREAMING_SNAKE_CASE_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_ = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __A ( self : Dict , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ = tokenizer.num_special_tokens_to_add(__magic_name__ )
SCREAMING_SNAKE_CASE_ = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : PreTrainedTokenizer , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[str] ) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_ = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
| 305
| 0
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A__ :
_UpperCAmelCase :float
_UpperCAmelCase :TreeNode | None = None
_UpperCAmelCase :TreeNode | None = None
def A_ ( _lowerCAmelCase ) -> bool:
# Validation
def is_valid_tree(_lowerCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowerCAmelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowerCAmelCase )
)
return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__A = logging.getLogger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=-1 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = label_idx
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = mode.value
lowerCAmelCase__ :List[str] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :List[str] = 1
lowerCAmelCase__ :Union[str, Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :List[str] = []
else:
lowerCAmelCase__ :List[str] = line.split(' ' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ :Optional[Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__UpperCAmelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :Any = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
lowerCAmelCase__ :str = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ :Optional[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = mode.value
lowerCAmelCase__ :Union[str, Any] = os.path.join(__UpperCAmelCase , F"{mode}.txt" )
lowerCAmelCase__ :Any = 1
lowerCAmelCase__ :Optional[Any] = []
with open(__UpperCAmelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Dict = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = preds_list[example_id]
lowerCAmelCase__ :Tuple = ''
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if path:
with open(__UpperCAmelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 293
| 0
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__ = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__lowerCamelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 204
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE__ = i
for k in range(i + 1 , __UpperCamelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE__ = k
if least != i:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : Any = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 204
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if not nums:
return 0
UpperCamelCase = nums[0]
UpperCamelCase = 0
for num in nums[1:]:
UpperCamelCase , UpperCamelCase = (
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if not nums:
return 0
SCREAMING_SNAKE_CASE__ : Dict =nums[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =(
max_excluding + num,
max(UpperCamelCase__, UpperCamelCase__ ),
)
return max(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152
| 0
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
for i in range(1, lowerCamelCase):
__lowerCAmelCase = collection[i]
__lowerCAmelCase = 0
__lowerCAmelCase = i - 1
while low <= high:
__lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase = mid - 1
else:
__lowerCAmelCase = mid + 1
for j in range(lowerCamelCase, lowerCamelCase, -1):
__lowerCAmelCase = collection[j - 1]
__lowerCAmelCase = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Any = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase : int = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 355
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = 1
__lowerCAmelCase = 2
while i * i <= n:
__lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __magic_name__( ):
__lowerCAmelCase = 1
__lowerCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 9
| 0
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1_0 , 1_0)
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , 0.1)
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase__)
try:
pickle.loads(pickle.dumps(lowerCAmelCase__))
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 100
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ : Union[str, Any] = pytest.mark.integration
UpperCAmelCase_ : List[Any] = {'comet'}
UpperCAmelCase_ : int = importlib.util.find_spec('fairseq') is not None
UpperCAmelCase_ : Optional[Any] = {'code_eval'}
UpperCAmelCase_ : Optional[int] = os.name == 'nt'
UpperCAmelCase_ : Dict = {'bertscore', 'frugalscore', 'perplexity'}
UpperCAmelCase_ : Dict = importlib.util.find_spec('transformers') is not None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , SCREAMING_SNAKE_CASE__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class lowercase__ ( parameterized.TestCase ):
'''simple docstring'''
A_ : Optional[int] = {}
A_ : Union[str, Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : str = """[...]"""
_SCREAMING_SNAKE_CASE : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
_SCREAMING_SNAKE_CASE : Optional[int] = datasets.load.import_main_class(metric_module.__name__ , dataset=__snake_case )
# check parameters
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__snake_case , metric_module.__name__ ):
with self.use_local_metrics():
try:
_SCREAMING_SNAKE_CASE : int = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = """[...]"""
_SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , __snake_case ) ).module_path )
# run doctest
with self.use_local_metrics():
_SCREAMING_SNAKE_CASE : List[str] = doctest.testmod(__snake_case , verbose=__snake_case , raise_on_error=__snake_case )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__snake_case ):
yield
else:
yield
@contextmanager
def UpperCAmelCase_ ( self ):
def load_local_metric(__snake_case , *__snake_case , **__snake_case ):
return load_metric(os.path.join("""metrics""" , __snake_case ) , *__snake_case , **__snake_case )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_local_metric
yield
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
def wrapper(__snake_case ):
_SCREAMING_SNAKE_CASE : Any = contextmanager(__snake_case )
_SCREAMING_SNAKE_CASE : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class lowercase__ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
_SCREAMING_SNAKE_CASE : Any = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
_SCREAMING_SNAKE_CASE : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def load_from_checkpoint(SCREAMING_SNAKE_CASE__ ):
class lowercase__ :
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case , *__snake_case , **__snake_case ):
assert len(__snake_case ) == 2
_SCREAMING_SNAKE_CASE : Dict = [0.19, 0.92]
return scores, sum(__snake_case ) / len(__snake_case )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
_SCREAMING_SNAKE_CASE : Any = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
_SCREAMING_SNAKE_CASE : List[str] = load_from_checkpoint
yield
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
_SCREAMING_SNAKE_CASE : List[str] = """ERROR"""
_SCREAMING_SNAKE_CASE : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 200
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : str , lowercase__ : List[Any]=7 , lowercase__ : Optional[int]=3 , lowercase__ : Tuple=18 , lowercase__ : Optional[int]=30 , lowercase__ : int=400 , lowercase__ : Union[str, Any]=True , lowercase__ : Tuple=None , lowercase__ : int=True , ):
'''simple docstring'''
lowerCAmelCase__ = size if size is not None else {'height': 18, 'width': 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
def __snake_case ( self : Dict):
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class a_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = ImageGPTImageProcessor if is_vision_available() else None
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = ImageGPTImageProcessingTester(self)
@property
def __snake_case ( self : str):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , 'clusters'))
self.assertTrue(hasattr(lowercase__ , 'do_resize'))
self.assertTrue(hasattr(lowercase__ , 'size'))
self.assertTrue(hasattr(lowercase__ , 'do_normalize'))
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict)
lowerCAmelCase__ = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase__ , 'image_processor.json')
image_processor_first.to_json_file(lowercase__)
lowerCAmelCase__ = self.image_processing_class.from_json_file(lowercase__).to_dict()
lowerCAmelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
lowerCAmelCase__ = self.image_processing_class.from_pretrained(lowercase__).to_dict()
lowerCAmelCase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('ImageGPT requires clusters at initialization')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( ):
lowerCAmelCase__ = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
lowerCAmelCase__ = Image.open(dataset[4]['file'] )
lowerCAmelCase__ = Image.open(dataset[5]['file'] )
lowerCAmelCase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small')
lowerCAmelCase__ = prepare_images()
# test non-batched
lowerCAmelCase__ = image_processing(images[0] , return_tensors='pt')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_024))
lowerCAmelCase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
lowerCAmelCase__ = image_processing(lowercase__ , return_tensors='pt')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_024))
lowerCAmelCase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 119
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a_ :
'''simple docstring'''
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = None
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
UpperCAmelCase_ = False
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __snake_case ( self : Dict):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 119
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''lilt'''
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=3_0_5_2_2 , lowerCAmelCase__ : int=7_6_8 , lowerCAmelCase__ : Optional[int]=1_2 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Optional[Any]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : int="absolute" , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Optional[int]=1_0_2_4 , **lowerCAmelCase__ : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : int = position_embedding_type
_UpperCAmelCase : Any = classifier_dropout
_UpperCAmelCase : List[Any] = channel_shrink_ratio
_UpperCAmelCase : Optional[int] = max_ad_position_embeddings
| 145
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("google/mt5-small" )
_UpperCAmelCase : str = tokenizer("Hello there" , return_tensors="pt" ).input_ids
_UpperCAmelCase : str = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
_UpperCAmelCase : Any = model(input_ids.to(lowerCAmelCase__ ) , labels=labels.to(lowerCAmelCase__ ) ).loss
_UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
_UpperCAmelCase : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 145
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self :List[str] , a :Any , a :Tuple=7 , a :Any=3 , a :Tuple=1_8 , a :List[str]=3_0 , a :List[str]=4_0_0 , a :Dict=None , a :Tuple=True , a :List[str]=True , a :Any=None , ) -> List[Any]:
__UpperCamelCase : int = size if size is not None else {"height": 2_0, "width": 2_0}
__UpperCamelCase : Dict = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : str = num_channels
__UpperCamelCase : Optional[Any] = image_size
__UpperCamelCase : str = min_resolution
__UpperCamelCase : Tuple = max_resolution
__UpperCamelCase : int = size
__UpperCamelCase : Any = do_normalize
__UpperCamelCase : int = do_convert_rgb
__UpperCamelCase : int = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
__UpperCamelCase : str = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _lowerCamelCase ( self :Dict ) -> Optional[int]:
__UpperCamelCase : List[str] = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
__UpperCamelCase : Dict = Image.open(requests.get(a , stream=a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = PixaStructImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : Dict = PixaStructImageProcessingTester(self )
@property
def _lowerCamelCase ( self :Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : str = self.image_processor_tester.prepare_dummy_image()
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
__UpperCamelCase : List[Any] = 2_0_4_8
__UpperCamelCase : str = image_processor(a , return_tensors="pt" , max_patches=a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]:
# Initialize image_processor
__UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__UpperCamelCase : Optional[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : List[str] = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self :Union[str, Any] ) -> List[str]:
# Initialize image_processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
__UpperCamelCase : Any = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a ):
__UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
__UpperCamelCase : List[Any] = "Hello"
__UpperCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : List[str] = image_processor(
a , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
# Initialize image_processor
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
__UpperCamelCase : Dict = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : str = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _lowerCamelCase ( self :Tuple ) -> Union[str, Any]:
# Initialize image_processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
__UpperCamelCase : Dict = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : Tuple = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = PixaStructImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self :Union[str, Any] ) -> int:
__UpperCamelCase : Tuple = PixaStructImageProcessingTester(self , num_channels=4 )
__UpperCamelCase : Union[str, Any] = 3
@property
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self :Tuple ) -> int:
__UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def _lowerCamelCase ( self :Tuple ) -> str:
# Initialize image_processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
__UpperCamelCase : Tuple = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__UpperCamelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__UpperCamelCase : int = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 362
|
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[str] = qubits
# Using Aer's simulator
__UpperCamelCase : int = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , _lowerCamelCase):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase)) , list(range(_lowerCamelCase)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 151
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__UpperCAmelCase : Any = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__UpperCAmelCase : List[Any] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
__UpperCAmelCase : int = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCAmelCase__ ( self : Optional[int] , A : int , A : Any , A : int = CHRF.CHAR_ORDER , A : int = CHRF.WORD_ORDER , A : int = CHRF.BETA , A : bool = False , A : bool = False , A : bool = False , ):
__snake_case: Any = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__snake_case: Any = [[refs[i] for refs in references] for i in range(A )]
__snake_case: int = CHRF(A , A , A , A , A , A )
__snake_case: List[Any] = sb_chrf.corpus_score(A , A )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 111
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : str = Dict[str, Any]
__UpperCAmelCase : int = List[Prediction]
@add_end_docstrings(__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , *A : Optional[int] , **A : Optional[int] ):
super().__init__(*A , **A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCAmelCase__ ( self : List[str] , **A : Tuple ):
__snake_case: List[str] = {}
if "threshold" in kwargs:
__snake_case: Optional[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : int , *A : Optional[Any] , **A : Tuple ):
return super().__call__(*A , **A )
def UpperCAmelCase__ ( self : Optional[int] , A : str ):
__snake_case: Optional[Any] = load_image(A )
__snake_case: Dict = torch.IntTensor([[image.height, image.width]] )
__snake_case: str = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__snake_case: Optional[Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__snake_case: Any = target_size
return inputs
def UpperCAmelCase__ ( self : Optional[int] , A : Dict ):
__snake_case: int = model_inputs.pop("""target_size""" )
__snake_case: int = self.model(**A )
__snake_case: Any = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__snake_case: Optional[int] = model_inputs["""bbox"""]
return model_outputs
def UpperCAmelCase__ ( self : List[Any] , A : Optional[int] , A : Union[str, Any]=0.9 ):
__snake_case: Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case , __snake_case: Union[str, Any] = target_size[0].tolist()
def unnormalize(A : Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__snake_case , __snake_case: Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case: List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case: int = [unnormalize(A ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__snake_case: int = ["""score""", """label""", """box"""]
__snake_case: List[Any] = [dict(zip(A , A ) ) for vals in zip(scores.tolist() , A , A ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case: Tuple = self.image_processor.post_process_object_detection(A , A , A )
__snake_case: Optional[Any] = raw_annotations[0]
__snake_case: int = raw_annotation["""scores"""]
__snake_case: int = raw_annotation["""labels"""]
__snake_case: Optional[Any] = raw_annotation["""boxes"""]
__snake_case: Union[str, Any] = scores.tolist()
__snake_case: List[str] = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case: List[str] = [self._get_bounding_box(A ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case: List[Any] = ["""score""", """label""", """box"""]
__snake_case: Dict = [
dict(zip(A , A ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCAmelCase__ ( self : Optional[Any] , A : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__snake_case , __snake_case , __snake_case , __snake_case: Union[str, Any] = box.int().tolist()
__snake_case: Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 111
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = git.Repo(search_parent_directories=UpperCAmelCase__ )
A_ = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(UpperCAmelCase__, """git_log.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__, indent=4 )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
if params.n_gpu <= 0:
A_ = 0
A_ = -1
A_ = True
A_ = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
A_ = int(os.environ["""WORLD_SIZE"""] )
A_ = int(os.environ["""N_GPU_NODE"""] )
A_ = int(os.environ["""RANK"""] )
# number of nodes / node ID
A_ = params.world_size // params.n_gpu_per_node
A_ = params.global_rank // params.n_gpu_per_node
A_ = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
A_ = 1
A_ = 0
A_ = 0
A_ = 0
A_ = 1
A_ = 1
A_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
A_ = params.node_id == 0 and params.local_rank == 0
A_ = params.n_nodes > 1
# summary
A_ = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""", backend="""nccl""", )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 101
| 1
|
"""simple docstring"""
import datasets
lowerCAmelCase_ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowerCAmelCase_ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowerCAmelCase_ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}
| 16
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( lowerCamelCase ):
lowercase = (DDPMParallelScheduler,)
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def A__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = self.dummy_sample_deter + 0.1
UpperCamelCase = self.dummy_sample_deter - 0.1
UpperCamelCase = samplea.shape[0]
UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase = -1
else:
UpperCamelCase = timesteps[i + 1]
UpperCamelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
UpperCamelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [100, 87, 50, 1, 0]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( __a ):
"""simple docstring"""
__magic_name__ :Dict = """lilt"""
def __init__( self , __UpperCAmelCase=3_0_5_2_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=4 , __UpperCAmelCase=1_0_2_4 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase__ :str = vocab_size
lowerCAmelCase__ :Optional[Any] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Dict = hidden_dropout_prob
lowerCAmelCase__ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Dict = max_position_embeddings
lowerCAmelCase__ :Any = type_vocab_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :str = layer_norm_eps
lowerCAmelCase__ :Optional[int] = position_embedding_type
lowerCAmelCase__ :List[str] = classifier_dropout
lowerCAmelCase__ :int = channel_shrink_ratio
lowerCAmelCase__ :Any = max_ad_position_embeddings
| 369
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :int = """swin"""
__magic_name__ :Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :List[Any] = patch_size
lowerCAmelCase__ :Optional[int] = num_channels
lowerCAmelCase__ :str = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :List[str] = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[Any] = window_size
lowerCAmelCase__ :List[Any] = mlp_ratio
lowerCAmelCase__ :int = qkv_bias
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :Dict = use_absolute_embeddings
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Dict = initializer_range
lowerCAmelCase__ :int = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :str = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :str = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 254
| 0
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : Tuple ='pixel_values'
lowercase : Any =False
lowercase : int =TimmBackboneConfig
def __init__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, '''timm''' )
super().__init__(lowerCAmelCase )
lowerCamelCase_ =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(lowerCAmelCase, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowerCamelCase_ =getattr(lowerCAmelCase, '''use_pretrained_backbone''', lowerCAmelCase )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ =config.out_indices if getattr(lowerCAmelCase, '''out_indices''', lowerCAmelCase ) is not None else (-1,)
lowerCamelCase_ =timm.create_model(
config.backbone, pretrained=lowerCAmelCase, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCAmelCase, **lowerCAmelCase, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ =self._backbone.return_layers
lowerCamelCase_ ={layer['''module''']: str(lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCAmelCase )
@classmethod
def lowercase__ ( cls, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ =kwargs.pop('''config''', TimmBackboneConfig() )
lowerCamelCase_ =kwargs.pop('''use_timm_backbone''', lowerCAmelCase )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowerCamelCase_ =kwargs.pop('''num_channels''', config.num_channels )
lowerCamelCase_ =kwargs.pop('''features_only''', config.features_only )
lowerCamelCase_ =kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
lowerCamelCase_ =kwargs.pop('''out_indices''', config.out_indices )
lowerCamelCase_ =TimmBackboneConfig(
backbone=lowerCAmelCase, num_channels=lowerCAmelCase, features_only=lowerCAmelCase, use_pretrained_backbone=lowerCAmelCase, out_indices=lowerCAmelCase, )
return super()._from_config(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
pass
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ =self._all_layers
lowerCamelCase_ =self._backbone(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =self._return_layers
lowerCamelCase_ =tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ =self._backbone(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =None
lowerCamelCase_ =tuple(lowerCAmelCase )
lowerCamelCase_ =tuple(lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ =(feature_maps,)
if output_hidden_states:
lowerCamelCase_ =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase, hidden_states=lowerCAmelCase, attentions=lowerCAmelCase )
| 75
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 241
| 0
|
'''simple docstring'''
import math
from collections.abc import Callable
def _lowerCamelCase ( lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float , lowerCamelCase_ : float ):
"""simple docstring"""
UpperCAmelCase_ : float = xa
UpperCAmelCase_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase_ ) == function(lowerCamelCase_ ):
raise ZeroDivisionError('float division by zero, could not find root' )
UpperCAmelCase_ : float = x_na - (
function(lowerCamelCase_ ) / ((function(lowerCamelCase_ ) - function(lowerCamelCase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCAmelCase_ : List[Any] = x_na
UpperCAmelCase_ : Dict = x_na
def _lowerCamelCase ( lowerCamelCase_ : float ):
"""simple docstring"""
return math.pow(lowerCamelCase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 274
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274
| 1
|
"""simple docstring"""
import itertools
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def UpperCAmelCase__ (snake_case__ : int = 1_00_01 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
if len(snake_case__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_snake_case : Optional[int] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
_snake_case : Optional[Any] = current_sum - array[i] + array[i + k]
_snake_case : List[str] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A_ = [randint(-10_00, 10_00) for i in range(1_00)]
A_ = randint(0, 1_10)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 64
| 1
|
"""simple docstring"""
import random
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = a[left_index]
__lowerCAmelCase : Union[str, Any] = left_index + 1
for j in range(left_index + 1 ,__snake_case ):
if a[j] < pivot:
__lowerCAmelCase : Any = a[i], a[j]
i += 1
__lowerCAmelCase : Any = a[i - 1], a[left_index]
return i - 1
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
if left < right:
__lowerCAmelCase : List[Any] = random.randint(__snake_case ,right - 1 )
__lowerCAmelCase : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowerCAmelCase : Tuple = partition(__snake_case ,__snake_case ,__snake_case )
quick_sort_random(
__snake_case ,__snake_case ,__snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__snake_case ,pivot_index + 1 ,__snake_case ) # recursive quicksort to the right of the pivot point
def _lowercase ( ) -> Any:
__lowerCAmelCase : Any = input("Enter numbers separated by a comma:\n" ).strip()
__lowerCAmelCase : List[Any] = [int(__snake_case ) for item in user_input.split("," )]
quick_sort_random(__snake_case ,0 ,len(__snake_case ) )
print(__snake_case )
if __name__ == "__main__":
main()
| 362
|
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 58
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : list[list[int]] = []
create_all_state(1 , lowercase__ , lowercase__ , [] , lowercase__ )
return result
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase__ , total_number - level + 2 ):
current_list.append(lowercase__ )
create_all_state(i + 1 , lowercase__ , level - 1 , lowercase__ , lowercase__ )
current_list.pop()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
for i in total_list:
print(*lowercase__ )
if __name__ == "__main__":
lowercase__ = 4
lowercase__ = 2
lowercase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 241
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9
| 0
|
def A__ ( __lowerCamelCase = 1, __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
for divide_by_number in range(__lowerCamelCase, digit + 1 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
|
__UpperCAmelCase = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while place < len(__lowerCamelCase ):
if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = divmod(__lowerCamelCase, __lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Optional[int] ) -> List[str]:
UpperCamelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
UpperCamelCase : Any = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
UpperCamelCase : Any = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase : Optional[int] = key[key.find('patch_embed' ) + len('patch_embed' )]
UpperCamelCase : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(snake_case__ )-1}""" )
if "norm" in key:
UpperCamelCase : List[Any] = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase : List[str] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
UpperCamelCase : List[str] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(snake_case__ )-1}""" )
if "layer_norm1" in key:
UpperCamelCase : List[Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
UpperCamelCase : Any = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase : str = key[key.find('block' ) + len('block' )]
UpperCamelCase : str = key.replace(F"""block{idx}""" , F"""block.{int(snake_case__ )-1}""" )
if "attn.q" in key:
UpperCamelCase : Optional[int] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
UpperCamelCase : int = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
UpperCamelCase : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
UpperCamelCase : str = key.replace('fc1' , 'dense1' )
if "fc2" in key:
UpperCamelCase : Tuple = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
UpperCamelCase : Union[str, Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
UpperCamelCase : Any = key.replace('linear_fuse.conv' , 'linear_fuse' )
UpperCamelCase : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase : Dict = key[key.find('linear_c' ) + len('linear_c' )]
UpperCamelCase : Optional[Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(snake_case__ )-1}""" )
if "bot_conv" in key:
UpperCamelCase : Dict = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
UpperCamelCase : Optional[int] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
UpperCamelCase : Tuple = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
UpperCamelCase : Any = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
UpperCamelCase : Optional[int] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
UpperCamelCase : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
UpperCamelCase : Any = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
UpperCamelCase : str = key.replace('module.last_layer_depth' , 'head.head' )
UpperCamelCase : str = value
return new_state_dict
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Tuple ) -> List[str]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase : Optional[int] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCamelCase : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase : Any = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase : str = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase : Union[str, Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=False , snake_case__ : List[str]=None ) -> List[str]:
UpperCamelCase : Any = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase : Dict = GLPNImageProcessor()
# prepare image
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
UpperCamelCase : Union[str, Any] = torch.load(snake_case__ , map_location=torch.device('cpu' ) )
# rename keys
UpperCamelCase : List[Any] = rename_keys(snake_case__ )
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
UpperCamelCase : Optional[Any] = GLPNForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
UpperCamelCase : List[str] = model(snake_case__ )
UpperCamelCase : Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase : Any = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase : int = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase : Optional[int] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case__ , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 119
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[2, 2, 3, 2], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=["stage2", "stage3", "stage4"], SCREAMING_SNAKE_CASE_=[2, 3, 4], SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Dict = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : List[Any] = num_stages
UpperCamelCase : Any = hidden_sizes
UpperCamelCase : Optional[int] = depths
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_labels
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : str = initializer_range
UpperCamelCase : List[str] = out_features
UpperCamelCase : List[str] = out_indices
UpperCamelCase : str = scope
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Any:
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Dict = ConvNextModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[int] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : int = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase : Any = None
UpperCamelCase : List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = config_and_inputs
UpperCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = ConvNextModelTester(self )
UpperCamelCase : int = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def snake_case_ ( self ) -> str:
pass
def snake_case_ ( self ) -> Any:
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Dict = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , a__ ):
UpperCAmelCase__ : Tuple = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = ConvNextConfig
UpperCAmelCase__ : Tuple = False
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = ConvNextModelTester(self )
| 119
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = FunnelTokenizer
__lowercase = FunnelTokenizerFast
__lowercase = True
__lowercase = True
def UpperCAmelCase_ ( self :Tuple )-> List[str]:
super().setUp()
A__ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self :List[Any] , **lowercase_ :Optional[Any] )-> int:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , **lowercase_ :Tuple )-> Union[str, Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Dict )-> Optional[int]:
A__ = "UNwant\u00E9d,running"
A__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowercase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
A__ = tokenizer("UNwant\u00E9d,running" )
A__ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
A__ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 123
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [2_56, 5_12, 10_24, 10_24]
A__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = [2_56, 5_12, 7_68, 7_68]
A__ = 1_50
A__ = 16
A__ = (1, 3_84, 3_84)
A__ = False
A__ = "project"
if "ade" in checkpoint_url:
A__ = True
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = 1_50
A__ = 16
A__ = "huggingface/label-files"
A__ = "ade20k-id2label.json"
A__ = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
A__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
A__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
A__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
A__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
A__ = name.replace("proj" , "projection" )
if "blocks" in name:
A__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
A__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
A__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
A__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
A__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
A__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
A__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
A__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
A__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
A__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
A__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
A__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
A__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
A__ = name.replace("bn" , "batch_norm" )
if "head" in name:
A__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
A__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
A__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
A__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
A__ = name.replace(".." , "." )
if "stem.conv" in name:
A__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
A__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
A__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
A__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
A__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : str ):
A__, A__ = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(_lowerCamelCase )
A__ = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(_lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
A__ = 4_80 if "ade" in checkpoint_url else 3_84
A__ = DPTImageProcessor(size=_lowerCamelCase )
A__ = prepare_img()
A__ = image_processor(_lowerCamelCase , return_tensors="pt" )
# forward pass
A__ = model(**_lowerCamelCase ).logits if "ade" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123
| 1
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = '''hf-internal-testing/tiny-random-bert'''
__snake_case = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__snake_case = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
UpperCAmelCase : Tuple =f.read()
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
UpperCAmelCase : int =cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
UpperCAmelCase : List[str] =cached_file(A_ , A_ , revision='''9b8c223''' )
self.assertEqual(A_ , os.path.join(A_ , '''snapshots''' , A_ , A_ ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
UpperCAmelCase : Any =cached_file('''tiny-random-bert''' , A_ )
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
UpperCAmelCase : List[Any] =cached_file(A_ , A_ , revision='''aaaa''' )
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
UpperCAmelCase : str =cached_file(A_ , '''conf''' )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(A_ , '''does not appear to have a file named''' ):
UpperCAmelCase : Dict =cached_file(A_ , '''conf''' )
with open(os.path.join(A_ , '''refs''' , '''main''' ) ) as f:
UpperCAmelCase : Union[str, Any] =f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '''.no_exist''' , A_ , '''conf''' ) ) )
UpperCAmelCase : Union[str, Any] =cached_file(A_ , '''conf''' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
UpperCAmelCase : Optional[Any] =cached_file(A_ , '''conf''' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
UpperCAmelCase : Optional[int] =mock.Mock()
UpperCAmelCase : List[Any] =500
UpperCAmelCase : str ={}
UpperCAmelCase : str =HTTPError
UpperCAmelCase : List[str] ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
UpperCAmelCase : List[str] =cached_file(A_ , '''conf''' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A_ ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , A_ , revision='''ahaha''' )
UpperCAmelCase : List[Any] =get_file_from_repo('''bert-base-cased''' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
UpperCAmelCase : Any =json.loads(open(A_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[str] =Path(A_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(A_ , '''a.txt''' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , '''b.txt''' ) )
| 348
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__snake_case = json.load(f)
@require_torch
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int,A_: int ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def snake_case_ ( self: Dict,A_: int ):
'''simple docstring'''
__UpperCamelCase = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def snake_case_ ( self: Tuple,A_: Any,A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''facebook/wmt19-{pair}'''
__UpperCamelCase = self.get_tokenizer(A_ )
__UpperCamelCase = self.get_model(A_ )
__UpperCamelCase = bleu_data[pair]['src']
__UpperCamelCase = bleu_data[pair]['tgt']
__UpperCamelCase = tokenizer(A_,return_tensors='pt',truncation=A_,padding='longest' ).to(A_ )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids,num_beams=8,)
__UpperCamelCase = tokenizer.batch_decode(
A_,skip_special_tokens=A_,clean_up_tokenization_spaces=A_ )
__UpperCamelCase = calculate_bleu(A_,A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'],A_ )
| 310
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 39
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39
| 1
|
import cmath
import math
def _A ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
UpperCamelCase :Dict = math.radians(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = math.radians(SCREAMING_SNAKE_CASE__ )
# Convert voltage and current to rectangular form
UpperCamelCase :Optional[int] = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = cmath.rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ) -> int:
UpperCamelCase :List[Any] = parent
UpperCamelCase :List[str] = batch_size
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :Optional[Any] = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :Union[str, Any] = is_training
UpperCamelCase :Dict = use_labels
UpperCamelCase :List[Any] = hidden_size
UpperCamelCase :Optional[int] = num_hidden_layers
UpperCamelCase :Any = backbone_out_indices
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :int = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = initializer_range
UpperCamelCase :List[Any] = num_labels
UpperCamelCase :Any = backbone_featmap_shape
UpperCamelCase :Optional[int] = scope
UpperCamelCase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase :Tuple = (image_size // patch_size) ** 2
UpperCamelCase :int = num_patches + 1
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase :int = None
if self.use_labels:
UpperCamelCase :str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase :Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Tuple = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[int] = DPTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :Tuple = self.num_labels
UpperCamelCase :Any = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :int = self.num_labels
UpperCamelCase :str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple =(DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Optional[int] =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = DPTModelTester(self )
UpperCamelCase :List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Tuple = [*signature.parameters.keys()]
UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :int = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
UpperCamelCase :Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase :Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Optional[int]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCamelCase , UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Union[str, Any] = False
UpperCamelCase :Dict = True
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
UpperCamelCase :Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.train()
UpperCamelCase :List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
UpperCamelCase :Tuple = model_class(config=SCREAMING_SNAKE_CASE_ )
# Skip the check for the backbone
UpperCamelCase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCamelCase :Tuple = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCamelCase :int = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCamelCase , UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :Optional[Any] = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :int = DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
def _A ( ):
UpperCamelCase :List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Any = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCamelCase :int = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = prepare_img()
UpperCamelCase :Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = outputs.predicted_depth
# verify the predicted depth
UpperCamelCase :List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 259
| 1
|
def _SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_UpperCAmelCase = generate_large_matrix()
_UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> None:
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase : Any = (left + right) // 2
__lowerCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : List[str] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def _SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
__lowerCAmelCase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase : Optional[int] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 232
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case_ ( __lowercase ):
A_ = 'gptj'
A_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , _snake_case : Optional[int]=50400 , _snake_case : Union[str, Any]=2048 , _snake_case : Tuple=4096 , _snake_case : Optional[int]=28 , _snake_case : Tuple=16 , _snake_case : Optional[Any]=64 , _snake_case : Optional[int]=None , _snake_case : str="gelu_new" , _snake_case : str=0.0 , _snake_case : Optional[int]=0.0 , _snake_case : List[str]=0.0 , _snake_case : Tuple=1E-5 , _snake_case : List[str]=0.02 , _snake_case : Optional[int]=True , _snake_case : Optional[Any]=50256 , _snake_case : List[str]=50256 , _snake_case : str=False , **_snake_case : Tuple , )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = n_positions
__lowerCAmelCase : Optional[Any] = n_embd
__lowerCAmelCase : Optional[int] = n_layer
__lowerCAmelCase : Optional[int] = n_head
__lowerCAmelCase : List[Any] = n_inner
__lowerCAmelCase : List[str] = rotary_dim
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : str = resid_pdrop
__lowerCAmelCase : Union[str, Any] = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[Any] = layer_norm_epsilon
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Dict = use_cache
__lowerCAmelCase : List[str] = bos_token_id
__lowerCAmelCase : List[Any] = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case )
class snake_case_ ( __lowercase ):
def __init__( self : Union[str, Any] , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , )->str:
'''simple docstring'''
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case )
if not getattr(self._config , """pad_token_id""" , _snake_case ):
# TODO: how to do that better?
__lowerCAmelCase : Dict = 0
@property
def UpperCAmelCase__ ( self : Tuple )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowerCAmelCase : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction="""inputs""" )
__lowerCAmelCase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self : str , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , )->Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = super(_snake_case , self ).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase : Optional[Any] = seqlen + 2
__lowerCAmelCase : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase : Tuple = [
(torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(self.num_layers )
]
__lowerCAmelCase : int = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str )->int:
'''simple docstring'''
return 13
| 232
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Tuple = logging.get_logger(__name__)
A_ : int = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class A_ ( _a ):
'''simple docstring'''
a__ = "table-transformer"
a__ = ["past_key_values"]
a__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self , lowercase__=True , lowercase__=None , lowercase__=3 , lowercase__=100 , lowercase__=6 , lowercase__=2_048 , lowercase__=8 , lowercase__=6 , lowercase__=2_048 , lowercase__=8 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__="relu" , lowercase__=256 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , lowercase__=False , lowercase__="sine" , lowercase__="resnet50" , lowercase__=True , lowercase__=False , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=1 , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=0.1 , **lowercase__ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = backbone_config.get('''model_type''' )
__UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase = config_class.from_dict(lowercase__ )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None, None, None
__UpperCAmelCase = use_timm_backbone
__UpperCAmelCase = backbone_config
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_queries
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = init_xavier_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = auxiliary_loss
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = backbone
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = dilation
# Hungarian matcher
__UpperCAmelCase = class_cost
__UpperCAmelCase = bbox_cost
__UpperCAmelCase = giou_cost
# Loss coefficients
__UpperCAmelCase = mask_loss_coefficient
__UpperCAmelCase = dice_loss_coefficient
__UpperCAmelCase = bbox_loss_coefficient
__UpperCAmelCase = giou_loss_coefficient
__UpperCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
class A_ ( _a ):
'''simple docstring'''
a__ = version.parse("1.11" )
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCAmelCase_ (self ) -> float:
return 1E-5
@property
def lowerCAmelCase_ (self ) -> int:
return 12
| 333
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : tuple ,_UpperCAmelCase : Path ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any]=False ,) -> Optional[Any]:
output_path.parent.mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,use_external_data_format=_UpperCAmelCase ,enable_onnx_checker=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,)
else:
export(
_UpperCAmelCase ,_UpperCAmelCase ,f=output_path.as_posix() ,input_names=_UpperCAmelCase ,output_names=_UpperCAmelCase ,dynamic_axes=_UpperCAmelCase ,do_constant_folding=_UpperCAmelCase ,opset_version=_UpperCAmelCase ,)
@torch.no_grad()
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : bool = False ) -> int:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case : int = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__snake_case : Union[str, Any] = "cpu"
__snake_case : int = Path(_UpperCAmelCase )
# VAE DECODER
__snake_case : Any = AutoencoderKL.from_pretrained(model_path + '/vae' )
__snake_case : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case : Any = vae_decoder.decode
onnx_export(
_UpperCAmelCase ,model_args=(
torch.randn(1 ,_UpperCAmelCase ,25 ,25 ).to(device=_UpperCAmelCase ,dtype=_UpperCAmelCase ),
False,
) ,output_path=output_path / 'vae_decoder' / 'model.onnx' ,ordered_input_names=['latent_sample', 'return_dict'] ,output_names=['sample'] ,dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} ,opset=_UpperCAmelCase ,)
del vae_decoder
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
A__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 369
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A__ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
A__ : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
A__ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str:
with open(UpperCAmelCase_ , 'rb' ) as f:
__lowerCamelCase : Optional[Any] = Image.open(UpperCAmelCase_ )
return im.convert('RGB' )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase_ ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCAmelCase )} , )
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase : str = field(default=_UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase : bool = field(
default=_UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase : bool = field(
default=_UpperCAmelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Dict:
__lowerCamelCase : Dict = torch.stack([example['pixel_values'] for example in examples] )
__lowerCamelCase : Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase__ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase : Dict = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCamelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCamelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCamelCase : Any = {}
if data_args.train_dir is not None:
__lowerCamelCase : List[Any] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowerCamelCase : List[Any] = os.path.join(data_args.validation_dir , '**' )
__lowerCamelCase : List[Any] = load_dataset(
'imagefolder' , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCamelCase : List[Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase_ ) and data_args.train_val_split > 0.0:
__lowerCamelCase : Any = dataset['train'].train_test_split(data_args.train_val_split )
__lowerCamelCase : Tuple = split['train']
__lowerCamelCase : Any = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCamelCase : Any = dataset['train'].features['labels'].names
__lowerCamelCase , __lowerCamelCase : Any = {}, {}
for i, label in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : Optional[Any] = str(UpperCAmelCase_ )
__lowerCamelCase : Dict = label
# Load the accuracy metric from the datasets package
__lowerCamelCase : List[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase_ : Any ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase_ ) , labelaid=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCamelCase : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCamelCase : Tuple = image_processor.size['shortest_edge']
else:
__lowerCamelCase : Dict = (image_processor.size['height'], image_processor.size['width'])
__lowerCamelCase : Dict = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCamelCase : Dict = Compose(
[
RandomResizedCrop(UpperCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCamelCase : Union[str, Any] = Compose(
[
Resize(UpperCAmelCase_ ),
CenterCrop(UpperCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(UpperCAmelCase_ : List[Any] ):
__lowerCamelCase : Any = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(UpperCAmelCase_ : int ):
__lowerCamelCase : Optional[int] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCamelCase : Union[str, Any] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCamelCase : str = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCAmelCase_ )
# Initalize our trainer
__lowerCamelCase : Dict = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
__lowerCamelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : Optional[int] = last_checkpoint
__lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCamelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
# Write model card and (optionally) push to hub
__lowerCamelCase : Any = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A__ : str = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
A__ : Dict = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
A__ : int = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[str]:
for tf_name, hf_name in patterns:
__lowerCamelCase : Optional[int] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ) -> BigBirdPegasusForConditionalGeneration:
__lowerCamelCase : int = BigBirdPegasusConfig(**UpperCAmelCase_ )
__lowerCamelCase : Any = BigBirdPegasusForConditionalGeneration(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = torch_model.state_dict()
__lowerCamelCase : Tuple = {}
# separating decoder weights
__lowerCamelCase : Dict = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Tuple = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Tuple = DECODER_PATTERNS
__lowerCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : Union[str, Any] = v.T
__lowerCamelCase : str = torch.from_numpy(UpperCAmelCase_ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Optional[Any] = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Dict = REMAINING_PATTERNS
__lowerCamelCase : List[str] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : List[str] = v.T
__lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__lowerCamelCase : Any = mapping['model.embed_positions.weight']
__lowerCamelCase : Union[str, Any] = mapping.pop('model.embed_positions.weight' )
__lowerCamelCase , __lowerCamelCase : List[str] = torch_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
__lowerCamelCase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Dict:
__lowerCamelCase : int = tf.train.list_variables(UpperCAmelCase_ )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[Any] = ['global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
__lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : Dict = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = array
return tf_weights
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : dict ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = get_tf_weights_as_numpy(UpperCAmelCase_ )
__lowerCamelCase : List[str] = convert_bigbird_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
A__ : List[str] = parser.parse_args()
A__ : Optional[int] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 185
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : str = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCamelCase (_UpperCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = 'perceiver'
def __init__( self : Any, _UpperCAmelCase : List[str]=2_5_6, _UpperCAmelCase : List[str]=1_2_8_0, _UpperCAmelCase : int=7_6_8, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : Optional[Any]=2_6, _UpperCAmelCase : Dict=8, _UpperCAmelCase : Optional[int]=8, _UpperCAmelCase : List[str]=None, _UpperCAmelCase : Any=None, _UpperCAmelCase : int="kv", _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : str=0.02, _UpperCAmelCase : Tuple=1E-12, _UpperCAmelCase : str=True, _UpperCAmelCase : Optional[Any]=2_6_2, _UpperCAmelCase : str=2_0_4_8, _UpperCAmelCase : Dict=5_6, _UpperCAmelCase : str=[3_6_8, 4_9_6], _UpperCAmelCase : Any=1_6, _UpperCAmelCase : Optional[int]=1_9_2_0, _UpperCAmelCase : Union[str, Any]=1_6, _UpperCAmelCase : Any=[1, 1_6, 2_2_4, 2_2_4], **_UpperCAmelCase : Any, ) -> Dict:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : str = num_latents
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_latents
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_blocks
SCREAMING_SNAKE_CASE__ : Dict = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ : str = num_self_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = qk_channels
SCREAMING_SNAKE_CASE__ : List[str] = v_channels
SCREAMING_SNAKE_CASE__ : List[str] = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ : str = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ : List[str] = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ : Dict = num_frames
SCREAMING_SNAKE_CASE__ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ : Any = samples_per_patch
SCREAMING_SNAKE_CASE__ : Any = output_shape
class lowerCamelCase (_UpperCamelCase ):
"""simple docstring"""
@property
def A_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def A_ ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
def A_ ( self : str, _UpperCAmelCase : str, _UpperCAmelCase : List[str] = -1, _UpperCAmelCase : Dict = -1, _UpperCAmelCase : Optional[Any] = -1, _UpperCAmelCase : Tuple = False, _UpperCAmelCase : List[str] = None, _UpperCAmelCase : Tuple = 3, _UpperCAmelCase : List[Any] = 4_0, _UpperCAmelCase : str = 4_0, ) -> Mapping[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Optional[int] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Dict = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : str = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(preprocessor(_SCREAMING_SNAKE_CASE, return_tensors=_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs.pop("input_ids" )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE, fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ : Tuple = self._generate_dummy_images(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : str = dict(preprocessor(images=_SCREAMING_SNAKE_CASE, return_tensors=_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ : int = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 368
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : List[Any]=sys.maxsize ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "bilinear"
SCREAMING_SNAKE_CASE__ : Optional[int] = max_size
SCREAMING_SNAKE_CASE__ : Optional[int] = short_edge_length
def __call__( self : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for img in imgs:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE__ : List[str] = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE__ : int = size * 1.0 / min(_UpperCAmelCase, _UpperCAmelCase )
if h < w:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
if max(_UpperCAmelCase, _UpperCAmelCase ) > self.max_size:
SCREAMING_SNAKE_CASE__ : str = self.max_size * 1.0 / max(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = newh * scale
SCREAMING_SNAKE_CASE__ : List[str] = neww * scale
SCREAMING_SNAKE_CASE__ : Any = int(neww + 0.5 )
SCREAMING_SNAKE_CASE__ : List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : str = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE__ : Tuple = nn.functional.interpolate(
_UpperCAmelCase, (newh, neww), mode=self.interp_method, align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict, _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE__ : Any = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE__ : List[str] = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE__ : List[Any] = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE__ : Dict = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE__ : Optional[int] = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE__ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def A_ ( self : str, _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
nn.functional.pad(
_UpperCAmelCase, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(_UpperCAmelCase, _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(_UpperCAmelCase, images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
_UpperCAmelCase, torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE__ : Tuple = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE__ : List[Any] = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.true_divide(_UpperCAmelCase, _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple[int, int] ) -> List[Any]:
'''simple docstring'''
assert torch.isfinite(SCREAMING_SNAKE_CASE__ ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE__ )
| 191
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: Any = {}
if "threshold" in kwargs:
A: List[Any] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A: int = load_image(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = torch.IntTensor([[image.height, image.width]] )
A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
A: Any = target_size
return inputs
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Tuple = model_inputs.pop('''target_size''' )
A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ )
A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
A: Dict = model_inputs['''bbox''']
return model_outputs
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]:
'''simple docstring'''
A: List[Any] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A , A: Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
A: Dict = ['''score''', '''label''', '''box''']
A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[str] = raw_annotations[0]
A: List[Any] = raw_annotation['''scores''']
A: List[Any] = raw_annotation['''labels''']
A: int = raw_annotation['''boxes''']
A: Any = scores.tolist()
A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A: Tuple = ['''score''', '''label''', '''box''']
A: str = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
A , A , A , A: str = box.int().tolist()
A: str = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 319
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __a ( __a ):
__snake_case : Optional[int] = """gpt_neox"""
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any]=5_04_32 , UpperCAmelCase : str=61_44 , UpperCAmelCase : Tuple=44 , UpperCAmelCase : Dict=64 , UpperCAmelCase : List[str]=2_45_76 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Tuple=0.25 , UpperCAmelCase : Any=1_00_00 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int=20_48 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : List[Any] , ):
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : int = rotary_pct
lowerCAmelCase_ : str = rotary_emb_base
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Dict = hidden_dropout
lowerCAmelCase_ : Optional[int] = classifier_dropout
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Tuple = tie_word_embeddings
lowerCAmelCase_ : Any = use_parallel_residual
lowerCAmelCase_ : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def A ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , UpperCamelCase__ )
lowerCAmelCase_ : List[str] = self.rope_scaling.get("""factor""" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 368
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_lowerCAmelCase : List[str] = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
_lowerCAmelCase : List[Any] = parser.parse_args()
if args.check_lib:
_lowerCAmelCase : Any = importlib.import_module("transformers")
_lowerCAmelCase : List[str] = Path(transformers_module.__file__).parent
else:
_lowerCAmelCase : Dict = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ctrl'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=24_6534 , __snake_case=256 , __snake_case=1280 , __snake_case=8192 , __snake_case=48 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a =vocab_size
__a =n_positions
__a =n_embd
__a =n_layer
__a =n_head
__a =dff
__a =resid_pdrop
__a =embd_pdrop
__a =layer_norm_epsilon
__a =initializer_range
__a =use_cache
super().__init__(**__snake_case )
| 218
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Dict ="""speech_to_text_2"""
a : Any =["""past_key_values"""]
a : Optional[Any] ={"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self,__SCREAMING_SNAKE_CASE=1_00_00,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=20_48,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE="relu",__SCREAMING_SNAKE_CASE=2_56,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=1,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=10_24,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE,bos_token_id=__SCREAMING_SNAKE_CASE,eos_token_id=__SCREAMING_SNAKE_CASE,decoder_start_token_id=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 368
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46
| 0
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Any = len(_A )
lowercase__: Tuple = len(_A )
lowercase__: List[Any] = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase__: int = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 177
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('''wer''' )
lowerCAmelCase_ = load_metric('''cer''' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowerCAmelCase_ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowerCAmelCase_ = f"WER: {wer_result}\nCER: {cer_result}"
print(_A )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = f"log_{dataset_id}_predictions.txt"
lowerCAmelCase_ = f"log_{dataset_id}_targets.txt"
with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t:
# mapping function to write output
def write_to_file(_A , _A ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_A , with_indices=_A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(_A , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ''' '''.join(text.split(_A ) )
return text
def __UpperCamelCase ( _A ):
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_A ):
lowerCAmelCase_ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['''text''']
lowerCAmelCase_ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 278
| 0
|
from __future__ import annotations
def __magic_name__ ( A , A = None , A = None , A = False , ) -> tuple[int, float, str]:
snake_case = cipher_alphabet or [chr(A ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
snake_case = frequencies_dict
if not case_sensitive:
snake_case = ciphertext.lower()
# Chi squared statistic values
snake_case = {}
# cycle through all of the shifts
for shift in range(len(A ) ):
snake_case = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len(
A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case = decrypted_with_shift.lower().count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case = decrypted_with_shift.count(A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(A ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case = min(
A , key=A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
snake_case
) , (
snake_case
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 371
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_UpperCAmelCase = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_UpperCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_UpperCAmelCase = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__lowerCAmelCase )-1}""" )
if "norm" in key:
_UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_UpperCAmelCase = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find('block' ) + len('block' )]
_UpperCAmelCase = key.replace(F"""block{idx}""" , F"""block.{int(__lowerCAmelCase )-1}""" )
if "attn.q" in key:
_UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
_UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_UpperCAmelCase = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__lowerCAmelCase )-1}""" )
if key.startswith('head' ):
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def __A ( )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = SegformerConfig()
_UpperCAmelCase = False
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
if "segformer" in model_name:
_UpperCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_UpperCAmelCase = 150
_UpperCAmelCase = 'ade20k-id2label.json'
_UpperCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
_UpperCAmelCase = 19
_UpperCAmelCase = 'cityscapes-id2label.json'
_UpperCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_UpperCAmelCase = True
_UpperCAmelCase = model_name[4:6]
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 256
elif size == "b2":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 6, 3]
elif size == "b3":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 18, 3]
elif size == "b4":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 8, 27, 3]
elif size == "b5":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCAmelCase , align=__lowerCAmelCase , do_random_crop=__lowerCAmelCase )
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
else:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_UpperCAmelCase = rename_keys(__lowerCAmelCase , encoder_only=__lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
_UpperCAmelCase = False
_UpperCAmelCase = SegformerForImageClassification(__lowerCAmelCase )
else:
_UpperCAmelCase = SegformerForSemanticSegmentation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_UpperCAmelCase = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_UpperCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_a = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 39
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39
| 1
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __magic_name__ :
lowerCamelCase__ = MBartConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ) -> Tuple:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ = prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def __a ( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = TFMBartModel(config=__a ).get_decoder()
lowerCAmelCase_ = inputs_dict["input_ids"]
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase_ = inputs_dict["head_mask"]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def A(__a: Tuple , __a: Tuple , __a: Tuple , __a: Tuple=None , __a: List[str]=None , __a: List[str]=None , __a: int=None , __a: Optional[int]=None , ):
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCamelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self , _a , _a , _a , _a , _a ) -> str:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __a ( self ) -> Any:
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__a )
def __a ( self ) -> Dict:
self.config_tester.run_common_tests()
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowerCamelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowerCamelCase__ = """facebook/mbart-large-en-ro"""
@cached_property
def __a ( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self , **_a ) -> Tuple:
lowerCAmelCase_ = self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def __a ( self , **_a ) -> int:
lowerCAmelCase_ = self.tokenizer(self.src_text , **__a , return_tensors="tf" )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def __a ( self ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 367
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ''' Hello world! cécé herlolip'''
lowerCamelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def A(__a: Any ):
lowerCAmelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ):
lowerCAmelCase_ = dct.pop(__a )
lowerCAmelCase_ = val
def A(__a: Tuple ):
lowerCAmelCase_ = torch.load(__a , map_location="cpu" )
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def A(__a: List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a )
lowerCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ):
if not os.path.exists(__a ):
lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval()
else:
lowerCAmelCase_ = load_xsum_checkpoint(__a )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCAmelCase_ = checkpoint_path.replace("." , "-" )
lowerCAmelCase_ = BartConfig.from_pretrained(__a )
lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 )
lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(__a , __a ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
lowerCAmelCase_ = bart.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(__a , __a , __a )
lowerCAmelCase_ = BartForSequenceClassification(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a )
lowerCAmelCase_ = model(__a )[0] # logits
else: # no classification heads to worry about
lowerCAmelCase_ = bart.model.state_dict()
remove_ignore_keys_(__a )
lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCAmelCase_ = bart.extract_features(__a )
if hf_checkpoint_name == "facebook/bart-large":
lowerCAmelCase_ = BartModel(__a ).eval()
model.load_state_dict(__a )
lowerCAmelCase_ = model(__a ).model[0]
else:
lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt
model.model.load_state_dict(__a )
if hasattr(__a , "lm_head" ):
lowerCAmelCase_ = make_linear_from_emb(model.model.shared )
lowerCAmelCase_ = model.model(__a )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCamelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 22
| 0
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ : Union[str, Any] = """"""
UpperCAmelCase_ : Any = """"""
UpperCAmelCase_ : List[Any] = """"""
UpperCAmelCase_ : Tuple = """"""
def _A (__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tweepy.OAuthHandler(__lowerCamelCase , __lowerCamelCase )
auth.set_access_token(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_ : int = tweepy.API(__lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ : Dict = api.user_timeline(screen_name=__lowerCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__lowerCamelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__lowerCamelCase ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ : List[Any] = api.user_timeline(
screen_name=__lowerCamelCase , count=2_00 , max_id=__lowerCamelCase )
# save most recent tweets
alltweets.extend(__lowerCamelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : List[Any] = alltweets[-1].id - 1
print(f'...{len(__lowerCamelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , '''w''' ) as f:
SCREAMING_SNAKE_CASE_ : str = csv.writer(__lowerCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 91
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 59
| 0
|
import math
class lowercase_ :
def __init__( self , __UpperCamelCase=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase_ = n
UpperCamelCase_ = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
UpperCamelCase_ = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = w
def lowerCamelCase_ ( self ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase_ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_A = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 261
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 261
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase : Optional[Any] = 8
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Tuple:
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b c h w -> b c 1 h w' )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(__A , 'b c d h w -> b (c d) h w' )
_snake_case = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE__ ( __A , __A=BITS ) -> Optional[int]:
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
_snake_case = rearrange(__A , 'd -> d 1 1' )
_snake_case = rearrange(__A , 'b (c d) h w -> b c d h w' , d=8 )
_snake_case = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A = 0.0 , __A = True , __A=None , __A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(__A , __A )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(__A ) else 'cpu'
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
_snake_case = self._get_variance(__A , __A ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def SCREAMING_SNAKE_CASE__ ( self , __A , __A , __A , __A="epsilon" , __A=None , __A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case , _snake_case = torch.split(__A , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
_snake_case = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1.0 , ):
"""simple docstring"""
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 2_56 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_snake_case = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_snake_case = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_snake_case = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 42
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0
| 0
|
'''simple docstring'''
A = 9.8_06_65
def lowerCAmelCase__ ( lowerCamelCase : float ,lowerCamelCase : float ,lowerCamelCase : float = g ):
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 351
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A : Tuple = logging.get_logger(__name__)
A : Tuple = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "longformer"
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[List[int], int] = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 30522 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1e-12 , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[Any] = attention_window
_A : int = sep_token_id
_A : Tuple = bos_token_id
_A : Any = eos_token_id
_A : List[str] = vocab_size
_A : Any = hidden_size
_A : Optional[int] = num_hidden_layers
_A : int = num_attention_heads
_A : Dict = hidden_act
_A : List[Any] = intermediate_size
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : int = max_position_embeddings
_A : Any = type_vocab_size
_A : Dict = initializer_range
_A : Any = layer_norm_eps
_A : List[Any] = onnx_export
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : "PretrainedConfig" , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : "List[PatchingSpec]" = None):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Optional[Any] = True
@property
def A ( self : List[str]):
if self.task == "multiple-choice":
_A : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
])
@property
def A ( self : str):
_A : int = super().outputs
if self.task == "default":
_A : str = {0: 'batch'}
return outputs
@property
def A ( self : List[Any]):
return 1e-4
@property
def A ( self : Dict):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def A ( self : str , SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
_A : Union[str, Any] = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_A : Tuple = torch.zeros_like(inputs['input_ids'])
# make every second token global
_A : Dict = 1
return inputs
| 227
| 0
|
"""simple docstring"""
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = [False] * len(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = [s]
UpperCAmelCase_ : Optional[Any] = True
while queue:
UpperCAmelCase_ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[int] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = [-1] * (len(__lowerCAmelCase ))
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = float("Inf" )
UpperCAmelCase_ : int = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Dict = min(__lowerCAmelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Optional[int] = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : str = parent[v]
for i in range(len(__lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 61
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 231
| 0
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85
| 1
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
UpperCAmelCase_ :List[Any] = 0
UpperCAmelCase_ :int = 1
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = "generated"
def __init__( self , *__A , **__A ) -> int:
super().__init__(*__A , **__A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , **__A , ) -> str:
lowerCAmelCase_ :List[str] = {}
if truncation is not None:
lowerCAmelCase_ :List[Any] = truncation
lowerCAmelCase_ :Optional[int] = generate_kwargs
lowerCAmelCase_ :Optional[int] = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ :Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ :Any = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ :str = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ :int = self.tokenizer.encode(__A , add_special_tokens=__A )
if len(__A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase_ :Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
return True
def __lowerCAmelCase ( self , *__A , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowerCAmelCase_ :Dict = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ :Optional[int] = True
elif isinstance(args[0] , __A ):
lowerCAmelCase_ :Any = (prefix + args[0],)
lowerCAmelCase_ :Optional[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowerCAmelCase_ :List[str] = self.tokenizer(*__A , padding=__A , truncation=__A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__A , **__A ) -> Optional[int]:
lowerCAmelCase_ :Any = super().__call__(*__A , **__A )
if (
isinstance(args[0] , __A )
and all(isinstance(__A , __A ) for el in args[0] )
and all(len(__A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self , __A , __A=TruncationStrategy.DO_NOT_TRUNCATE , **__A ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self._parse_and_tokenize(__A , truncation=__A , **__A )
return inputs
def __lowerCAmelCase ( self , __A , **__A ) -> str:
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowerCAmelCase_ :Optional[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowerCAmelCase_ :Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowerCAmelCase_ :Optional[Any] = self.model.generate(**__A , **__A )
lowerCAmelCase_ :Optional[Any] = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ :str = output_ids.reshape(__A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ :Tuple = tf.reshape(__A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self , __A , __A=ReturnType.TEXT , __A=False ) -> List[str]:
lowerCAmelCase_ :int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ :Optional[Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ :str = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
}
records.append(__A )
return records
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "summary"
def __call__( self , *__A , **__A ) -> Tuple:
return super().__call__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = "translation"
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def __lowerCAmelCase ( self , *__A , __A=TruncationStrategy.DO_NOT_TRUNCATE , __A=None , __A=None ) -> Union[str, Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , __A ):
return self.tokenizer._build_translation_inputs(
*__A , return_tensors=self.framework , truncation=__A , src_lang=__A , tgt_lang=__A )
else:
return super()._parse_and_tokenize(*__A , truncation=__A )
def __lowerCAmelCase ( self , __A=None , __A=None , **__A ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = super()._sanitize_parameters(**__A )
if src_lang is not None:
lowerCAmelCase_ :List[str] = src_lang
if tgt_lang is not None:
lowerCAmelCase_ :Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ :Dict = kwargs.get("""task""" , self.task )
lowerCAmelCase_ :Any = task.split("""_""" )
if task and len(__A ) == 4:
# translation, XX, to YY
lowerCAmelCase_ :Optional[int] = items[1]
lowerCAmelCase_ :Optional[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__A , **__A ) -> str:
return super().__call__(*__A , **__A )
| 84
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( lowerCamelCase__ ):
__snake_case : Dict = 'encoder-decoder'
__snake_case : int = True
def __init__( self : Dict , **UpperCAmelCase : Optional[int] ):
super().__init__(**UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : Tuple = kwargs.pop("""encoder""" )
lowerCAmelCase_ : List[Any] = encoder_config.pop("""model_type""" )
lowerCAmelCase_ : Optional[int] = kwargs.pop("""decoder""" )
lowerCAmelCase_ : Union[str, Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : List[Any] = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : List[str] = True
@classmethod
def A ( cls : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[Any] = self.encoder.to_dict()
lowerCAmelCase_ : Any = self.decoder.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 356
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "pegasus"
lowercase = ["past_key_values"]
lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
| 316
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316
| 1
|
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE :Optional[int] = re.compile(R'([A-Z]+)([A-Z][a-z])')
SCREAMING_SNAKE_CASE :Optional[int] = re.compile(R'([a-z\d])([A-Z])')
SCREAMING_SNAKE_CASE :List[str] = re.compile(R'(?<!_)_(?!_)')
SCREAMING_SNAKE_CASE :Any = re.compile(R'(_{2,})')
SCREAMING_SNAKE_CASE :Dict = R'^\w+(\.\w+)*$'
SCREAMING_SNAKE_CASE :List[str] = R'<>:/\|?*'
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = _uppercase_uppercase_re.sub(r"\1_\2" , a_ )
__A = _lowercase_uppercase_re.sub(r"\1_\2" , a_ )
return name.lower()
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = _single_underscore_re.split(a_ )
__A = [_multiple_underscores_re.split(a_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a_ ) if n != "" )
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
if os.path.basename(a_ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , a_ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(a_ )}-{split}'''
def UpperCAmelCase ( a_ , a_ , a_ , a_=None ) -> Optional[Any]:
"""simple docstring"""
__A = filename_prefix_for_split(a_ , a_ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
__A = os.path.join(a_ , a_ )
return F'''{filepath}*'''
def UpperCAmelCase ( a_ , a_ , a_ , a_=None , a_=None ) -> int:
"""simple docstring"""
__A = filename_prefix_for_split(a_ , a_ )
__A = os.path.join(a_ , a_ )
if shard_lengths:
__A = len(a_ )
__A = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(a_ )]
if filetype_suffix:
__A = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__A = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 356
|
import numpy
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : numpy.ndarray ,A : numpy.ndarray ):
__A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__A = numpy.random.rand(
self.input_array.shape[1] ,4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__A = numpy.random.rand(
4 ,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__A = numpy.random.rand(3 ,1 )
# Real output values provided.
__A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__A = numpy.zeros(output_array.shape )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = sigmoid(
numpy.dot(self.input_array ,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase_ ( self : str ):
__A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T ,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,)
__A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T ,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,)
__A = numpy.dot(
self.input_array.T ,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,self.first_hidden_layer_and_second_hidden_layer_weights.T ,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) ,)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase_ ( self : Tuple ,A : numpy.ndarray ,A : int ,A : bool ):
for iteration in range(1 ,iterations + 1 ):
__A = self.feedforward()
self.back_propagation()
if give_loss:
__A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def UpperCamelCase_ ( self : List[Any] ,A : numpy.ndarray ):
__A = input_arr
__A = sigmoid(
numpy.dot(self.array ,self.input_layer_and_first_hidden_layer_weights ) )
__A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
__A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( a_ ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( a_ ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
__A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__A = TwoHiddenLayerNeuralNetwork(
input_array=a_ , output_array=a_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a_ , iterations=1_0 , give_loss=a_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 124
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.