code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = (EulerDiscreteScheduler,)
UpperCAmelCase : Tuple = 1_0
def __snake_case ( self : str , **__UpperCAmelCase : Union[str, Any]):
a : Any = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCAmelCase)
return config
def __snake_case ( self : Optional[int]):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def __snake_case ( self : int):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[Any] = self.scheduler_classes[0]
a : str = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a : Dict = torch.manual_seed(0)
a : Union[str, Any] = self.dummy_model()
a : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Optional[int] = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : Any = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : str = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1e-2
assert abs(result_mean.item() - 0.0_131) < 1e-3
def __snake_case ( self : Tuple):
a : Any = self.scheduler_classes[0]
a : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction")
a : List[Any] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a : Tuple = torch.manual_seed(0)
a : Dict = self.dummy_model()
a : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : str = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : List[str] = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : Optional[int] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 0.0_002) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def __snake_case ( self : Optional[int]):
a : List[str] = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Dict = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a : str = torch.manual_seed(0)
a : str = self.dummy_model()
a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : List[str] = sample.to(__UpperCAmelCase)
for t in scheduler.timesteps:
a : Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : Union[str, Any] = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : List[str] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1e-2
assert abs(result_mean.item() - 0.0_131) < 1e-3
def __snake_case ( self : str):
a : Any = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Any = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a : Any = torch.manual_seed(0)
a : str = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Union[str, Any] = sample.to(__UpperCAmelCase)
for t in scheduler.timesteps:
a : Tuple = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : List[str] = output.prev_sample
a : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase))
a : Tuple = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 124.52_299_499_511_719) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963) < 1e-3
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
'''simple docstring'''
import qiskit
def __UpperCamelCase ( UpperCAmelCase = 2 ):
lowercase__ : List[str] = qubits
# Using Aer's simulator
lowercase__ : Any = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowercase__ : List[str] = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCAmelCase ) ) , list(range(UpperCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowercase__ : Tuple = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 )
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 214
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ : Tuple = sorted(string.lower() )
return len(UpperCAmelCase ) == len(set(UpperCAmelCase ) )
if __name__ == "__main__":
__a: Union[str, Any] = input("""Enter a string """).strip()
__a: Tuple = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 214
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase__ , id=UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
if exitstatus == 5:
__lowerCamelCase = 0
# Doctest custom flag to ignore output.
__A = doctest.register_optionflag("IGNORE_RESULT")
__A = doctest.OutputChecker
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__A = CustomOutputChecker
__A = HfDoctestModule
__A = HfDocTestParser
| 90
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
__lowerCamelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , x.transpose() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , transpose(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , transpose(lowerCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ) , np.asarray(transpose(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase__ , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.reshape(lowerCamelCase__ , (4, 3) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.reshape(lowerCamelCase__ , (12, 5) ) ) )
@require_torch
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , reshape(lowerCamelCase__ , (4, 3) ).numpy() ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , reshape(lowerCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (4, 3) ) , np.asarray(reshape(lowerCamelCase__ , (4, 3) ) ) ) )
__lowerCamelCase = np.random.randn(3 , 4 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__ , (12, 5) ) , np.asarray(reshape(lowerCamelCase__ , (12, 5) ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.squeeze(lowerCamelCase__ ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.squeeze(lowerCamelCase__ , axis=2 ) ) )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , squeeze(lowerCamelCase__ ).numpy() ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , squeeze(lowerCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(1 , 3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ) , np.asarray(squeeze(lowerCamelCase__ ) ) ) )
__lowerCamelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ , axis=2 ) , np.asarray(squeeze(lowerCamelCase__ , axis=2 ) ) ) )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.expand_dims(lowerCamelCase__ , axis=1 ) ) )
@require_torch
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , expand_dims(lowerCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = np.random.randn(3 , 4 )
__lowerCamelCase = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__ , axis=1 ) , np.asarray(expand_dims(lowerCamelCase__ , axis=1 ) ) ) )
| 90
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowercase_ ( _lowerCamelCase : Optional[int]):
if hor == 128:
lowercase__ : Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 128, 256)
lowercase__ : Dict = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase__ : Tuple = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 64, 128, 256)
lowercase__ : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase__ : Union[str, Any] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
lowercase__ : Union[str, Any] = model.state_dict()
lowercase__ : List[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase__ : Tuple = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Dict = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( ):
lowercase__ : List[str] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase__ : List[str] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch")
lowercase__ : str = model
lowercase__ : Optional[int] = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Any = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin")
with open("hub/hopper-medium-v2/value_function/config.json" , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 352
|
def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = 8
# DPR tok
A__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A__ = os.path.join(self.tmpdirname,'dpr_tokenizer' )
os.makedirs(lowercase_,exist_ok=lowercase_ )
A__ = os.path.join(lowercase_,DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname,'bart_tokenizer' )
os.makedirs(lowercase_,exist_ok=lowercase_ )
A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file,'w',encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def snake_case__ ( self : List[str] )-> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname,'dpr_tokenizer' ) )
def snake_case__ ( self : List[Any] )-> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname,'bart_tokenizer' ) )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
A__ = os.path.join(self.tmpdirname,'rag_tokenizer' )
A__ = RagConfig(question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict() )
A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer(),generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowercase_ )
rag_tokenizer.save_pretrained(lowercase_ )
A__ = RagTokenizer.from_pretrained(lowercase_,config=lowercase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder,lowercase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(),rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator,lowercase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab(),rag_tokenizer.generator.get_vocab() )
@slow
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
A__ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A__ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
A__ = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
A__ = tokenizer(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 7
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = 2
@register_to_config
def __init__( self : str ,A : float = 0.02 ,A : float = 1_00 ,A : float = 1.0_07 ,A : float = 80 ,A : float = 0.05 ,A : float = 50 ,):
# standard deviation of the initial noise distribution
__A = sigma_max
# setable values
__A = None
__A = None
__A = None # sigma(t_i)
def UpperCamelCase_ ( self : str ,A : torch.FloatTensor ,A : Optional[int] = None ):
return sample
def UpperCamelCase_ ( self : Dict ,A : int ,A : Union[str, torch.device] = None ):
__A = num_inference_steps
__A = np.arange(0 ,self.num_inference_steps )[::-1].copy()
__A = torch.from_numpy(A ).to(A )
__A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__A = torch.tensor(A ,dtype=torch.floataa ,device=A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : torch.FloatTensor ,A : float ,A : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__A = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
__A = 0
# sample eps ~ N(0, S_noise^2 * I)
__A = self.config.s_noise * randn_tensor(sample.shape ,generator=A ).to(sample.device )
__A = sigma + gamma * sigma
__A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase_ ( self : Dict ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_hat + sigma_hat * model_output
__A = (sample_hat - pred_original_sample) / sigma_hat
__A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : Optional[int] ,A : torch.FloatTensor ,A : float ,A : float ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : torch.FloatTensor ,A : bool = True ,):
__A = sample_prev + sigma_prev * model_output
__A = (sample_prev - pred_original_sample) / sigma_prev
__A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A ,derivative=A ,pred_original_sample=A )
def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : str ):
raise NotImplementedError()
| 15
| 0
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ): # picklable for multiprocessing
return x.sum()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ): # picklable for multiprocessing
return i + 1
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = 42
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = {}
__a : str = []
__a : Union[str, Any] = 1
__a : Tuple = [1, 2]
__a : Tuple = {'a': 1, 'b': 2}
__a : str = {'a': [1, 2], 'b': [3, 4]}
__a : Optional[int] = {'a': {'1': 1}, 'b': 2}
__a : List[str] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__a : Any = {}
__a : Tuple = []
__a : List[str] = 2
__a : Optional[Any] = [2, 3]
__a : Tuple = {'a': 2, 'b': 3}
__a : str = {'a': [2, 3], 'b': [4, 5]}
__a : Any = {'a': {'1': 2}, 'b': 3}
__a : int = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
__a : Optional[Any] = 2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
__a : Dict = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
__a : Optional[int] = {'a': 2, 'b': 0, 'c': 2}
__a : List[str] = {
'a': np.eye(2 ).astype(__a ),
'b': np.zeros(3 ).astype(__a ),
'c': np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda __a : x + 1 , __a , num_proc=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {'a': 1, 'b': 2}
__a : Optional[Any] = {'a': 3, 'b': 4}
__a : Tuple = {'a': 5, 'b': 6}
__a : Union[str, Any] = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
class __UpperCamelCase :
A_ = "bar"
__a : List[Any] = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(__a , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__a : Optional[Any] = {F"""{i}""": i for i in range(_SCREAMING_SNAKE_CASE )}
__a : List[Any] = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCamelCase ( lowerCAmelCase_ ):
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
__a : Tuple = layers.Dense(2 )
def gen_random_output():
__a : Union[str, Any] = tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
__a : str = gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
__a : Any = gen_random_output()
__a : Union[str, Any] = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
def gen_random_output():
__a : List[Any] = torch.nn.Linear(3 , 2 )
__a : Union[str, Any] = torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
__a : List[Any] = gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
__a : List[Any] = gen_random_output()
__a : int = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
__a : Dict = gen_random_output()
with temp_seed(42 ):
__a : Dict = gen_random_output()
__a : Optional[Any] = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Union[str, Any] = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : str = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def lowerCamelCase ():
__a : Optional[int] = A(x=1 , y='foobar' )
__a : Union[str, Any] = {'x': 1, 'y': 'foobar'}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
__a : Dict = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
__a : Optional[Any] = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return text.split()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase ():
with Pool(2 ) as pool:
__a : Dict = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__a : Dict = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__a : Dict = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 362
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : bool = True ):
__a : int = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[Any] = model
__a : Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
__a : str = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
__a : Any = forward.__wrapped__
if forward == original_forward:
break
__a : str = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
__a : List[str] = model
__a : Optional[int] = compiled_model
return model
def lowerCamelCase ():
PartialState().wait_for_everyone()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def lowerCamelCase (**_SCREAMING_SNAKE_CASE : Tuple ):
for key, value in kwargs.items():
__a : Optional[int] = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a : Tuple = value
return destination
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = None ):
if port is None:
__a : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 294
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Any = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : str = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : List[str] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Tuple = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : str = after_output[0]
_lowercase : Optional[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Any = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Tuple = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : int = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Any = to_atuple(vision_model.config.patch_size)
_lowercase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : List[str] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
pt_model.to(lowerCamelCase)
pt_model.eval()
# prepare inputs
_lowercase : Any = inputs_dict
_lowercase : Optional[int] = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase : Tuple = pt_model(**lowerCamelCase).to_tuple()
_lowercase : Any = fx_model(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_pt=lowerCamelCase)
_lowercase : List[Any] = fx_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output.numpy(), 4E-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase, from_flax=lowerCamelCase)
pt_model_loaded.to(lowerCamelCase)
pt_model_loaded.eval()
with torch.no_grad():
_lowercase : Optional[Any] = pt_model_loaded(**lowerCamelCase).to_tuple()
self.assertEqual(len(lowerCamelCase), len(lowerCamelCase), 'Output lengths differ between Flax and PyTorch')
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]):
self.assert_almost_equals(lowerCamelCase, pt_output_loaded.numpy(), 4E-2)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : str = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Tuple = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase)
_lowercase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Tuple = VisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase, fx_model.params)
self.check_pt_flax_equivalence(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase : List[str] = config_inputs_dict.pop('vision_config')
_lowercase : str = config_inputs_dict.pop('text_config')
_lowercase : int = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase, lowerCamelCase, lowerCamelCase)
self.check_equivalence_flax_to_pt(lowerCamelCase, lowerCamelCase, lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : List[Any] = model_a(**lowerCamelCase)
_lowercase : Tuple = after_outputs[0]
_lowercase : Dict = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_flax
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : List[Any] = 13
_lowercase : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Union[str, Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = FlaxViTModelTester(self)
_lowercase : Any = FlaxBertModelTester(self)
_lowercase : Dict = vit_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : List[str] = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=lowerCamelCase, text_from_pt=lowerCamelCase, )
_lowercase : Tuple = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
_lowercase : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = FlaxCLIPVisionModel(lowerCamelCase)
_lowercase : Optional[Any] = FlaxBertModel(lowerCamelCase)
return vision_model, text_model
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = FlaxCLIPVisionModelTester(self)
_lowercase : Union[str, Any] = FlaxBertModelTester(self)
_lowercase : Tuple = clip_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Dict = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0)
_lowercase : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : List[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : List[Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image, lowerCamelCase, atol=1E-3))
| 21
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : int ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a ( self : List[Any] ) -> Any:
__lowerCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE__ ):
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , example_records[i] )
def a ( self : Tuple ) -> List[str]:
__lowerCAmelCase = self._create_example_records()
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a ( self : List[str] ) -> List[str]: # checks what happens with missing columns
__lowerCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def a ( self : Dict ) -> Optional[int]: # checks if the type can be inferred from the second record
__lowerCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__lowerCAmelCase = Dataset.from_list(SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def a ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 229
| 0
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__( __lowercase , __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ):
super().__init__()
a : Optional[int] = nn.Embedding(snake_case_ , snake_case_ )
a : int = nn.Embedding(snake_case_ , snake_case_ )
a : List[str] = False
a : List[str] = nn.Dropout(p=snake_case_ )
a : int = TaConfig(
vocab_size=snake_case_ , d_model=snake_case_ , num_heads=snake_case_ , d_kv=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , feed_forward_proj=snake_case_ , is_decoder=snake_case_ , is_encoder_decoder=snake_case_ , )
a : Optional[int] = nn.ModuleList()
for lyr_num in range(snake_case_ ):
a : Dict = TaBlock(snake_case_ )
self.encoders.append(snake_case_ )
a : str = TaLayerNorm(snake_case_ )
a : Dict = nn.Dropout(p=snake_case_ )
def lowercase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : int ):
a : Tuple = self.token_embedder(snake_case_ )
a : Tuple = encoder_input_tokens.shape[1]
a : str = torch.arange(snake_case_ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case_ )
a : Any = self.dropout_pre(snake_case_ )
# inverted the attention mask
a : Tuple = encoder_input_tokens.size()
a : List[str] = self.get_extended_attention_mask(snake_case_ , snake_case_ )
for lyr in self.encoders:
a : Tuple = lyr(snake_case_ , snake_case_ )[0]
a : List[str] = self.layer_norm(snake_case_ )
return self.dropout_post(snake_case_ ), encoder_inputs_mask
| 352
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a__( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , __snake_case : float , __snake_case : Callable , __snake_case : int , __snake_case : float = 1.0 , __snake_case : str = None , ):
super().__init__()
a : Dict = initial_learning_rate
a : Optional[int] = warmup_steps
a : Tuple = power
a : str = decay_schedule_fn
a : Optional[int] = name
def __call__( self : List[Any] , __snake_case : int ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a : Optional[int] = tf.cast(__snake_case , tf.floataa )
a : Tuple = tf.cast(self.warmup_steps , tf.floataa )
a : str = global_step_float / warmup_steps_float
a : Union[str, Any] = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def lowercase_ ( self : Dict ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( _A , _A , _A , _A = 0.0 , _A = 0.9 , _A = 0.999 , _A = 1E-8 , _A = None , _A = None , _A = 0.0 , _A = 1.0 , _A = None , ):
a : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_A , )
if num_warmup_steps:
a : Tuple = WarmUp(
initial_learning_rate=_A , decay_schedule_fn=_A , warmup_steps=_A , )
if weight_decay_rate > 0.0:
a : List[Any] = AdamWeightDecay(
learning_rate=_A , weight_decay_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=_A , )
else:
a : Tuple = tf.keras.optimizers.Adam(
learning_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , __snake_case : float = 0.9 , __snake_case : float = 0.999 , __snake_case : float = 1e-7 , __snake_case : bool = False , __snake_case : float = 0.0 , __snake_case : Optional[List[str]] = None , __snake_case : Optional[List[str]] = None , __snake_case : str = "AdamWeightDecay" , **__snake_case : Optional[int] , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
a : Tuple = weight_decay_rate
a : Optional[Any] = include_in_weight_decay
a : Any = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls : Any , __snake_case : Optional[Any] ):
a : Any = {'WarmUp': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def lowercase_ ( self : str , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
a : List[str] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str ):
a : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Any=None , **__snake_case : Tuple ):
a , a : Dict = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def lowercase_ ( self : List[Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a : List[Any] = apply_state or {}
a : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a : int = self._fallback_apply_state(__snake_case , __snake_case )
a : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple=None ):
a , a : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : List[Any] = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int=None ):
a , a : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : int = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self : List[str] , __snake_case : Dict ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class a__( lowerCamelCase__ ):
def __init__( self : List[str] ):
a : List[Any] = []
a : Optional[int] = None
@property
def lowercase_ ( self : Any ):
if self._accum_steps is None:
a : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self : Optional[int] ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , __snake_case : Any ):
if not self._gradients:
a : Union[str, Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__snake_case )}""" )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def lowercase_ ( self : Tuple ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) )
| 96
| 0
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[int], a_: int=None, **a_: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""", a_, )
super().__init__(args=a_, **a_ )
| 64
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328
| 0
|
import string
import numpy
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class UpperCAmelCase :
A__ : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : Optional[Any] = numpy.vectorize(lambda A_ : x % 36 )
A__ : Union[str, Any] = numpy.vectorize(A_ )
def __init__(self : Union[str, Any] , snake_case__ : numpy.ndarray ) -> None:
'''simple docstring'''
snake_case : Optional[int] = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : Optional[int] = encrypt_key.shape[0]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> int:
'''simple docstring'''
return self.key_string.index(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int ) -> str:
'''simple docstring'''
return self.key_string[round(snake_case__ )]
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> None:
'''simple docstring'''
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Optional[int] = len(self.key_string )
if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1:
snake_case : str = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : List[str] = [char for char in text.upper() if char in self.key_string]
snake_case : Tuple = chars[-1]
while len(snake_case__ ) % self.break_key != 0:
chars.append(snake_case__ )
return "".join(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : Any = self.process_text(text.upper() )
snake_case : Optional[int] = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
snake_case : List[Any] = text[i : i + self.break_key]
snake_case : Union[str, Any] = [self.replace_letters(snake_case__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[
0
]
snake_case : List[Any] = "".join(
self.replace_digits(snake_case__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> numpy.ndarray:
'''simple docstring'''
snake_case : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : str = i
break
snake_case : int = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
snake_case : str = self.make_decrypt_key()
snake_case : Dict = self.process_text(text.upper() )
snake_case : Optional[Any] = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
snake_case : str = text[i : i + self.break_key]
snake_case : List[Any] = [self.replace_letters(snake_case__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[int] = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0]
snake_case : int = "".join(
self.replace_digits(snake_case__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCamelCase ( ):
snake_case : Optional[int] = int(input("Enter the order of the encryption key: " ) )
snake_case : Union[str, Any] = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
snake_case : Union[str, Any] = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
snake_case : Any = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
snake_case : Tuple = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
snake_case : Any = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
snake_case : Optional[int] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 10
|
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple ):
snake_case : Optional[Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()]
snake_case : Union[str, Any] = [x.strip() for x in open(__lowerCamelCase ).readlines()][: len(__lowerCamelCase )]
snake_case : List[Any] = calculate_rouge(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
if save_path is not None:
save_json(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 10
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
# Initialise PyTorch model
UpperCamelCase : Any = MobileBertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Tuple = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
UpperCamelCase : int = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 119
| 0
|
"""simple docstring"""
import datasets
__UpperCamelCase : List[Any] = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__UpperCamelCase : Tuple = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__UpperCamelCase : Optional[int] = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Dict ):
return {"accuracy": simple_accuracy(lowercase_ ,lowercase_ )}
| 74
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : int = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( A_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ : str = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A_ , id=A_ )
| 74
| 1
|
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
def get_matched_characters(__A, __A ) -> str:
UpperCAmelCase__ = []
UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase__ = int(max(0, i - limit ) )
UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase__ = f"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = get_matched_characters(__A, __A )
UpperCAmelCase__ = len(__A )
# transposition
UpperCAmelCase__ = (
len([(ca, ca) for ca, ca in zip(__A, __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase__ = 0.0
else:
UpperCAmelCase__ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase__ = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 65
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329
| 0
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Union[str, Any] = 1.5
lowercase_ : str = int(factor * num_class_images )
lowercase_ : Union[str, Any] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : Any = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase_ : Dict = int(factor * num_images )
lowercase_ : Optional[int] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase_ : Optional[Any] = 0
lowercase_ : Union[str, Any] = 0
lowercase_ : int = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase_ : int = class_images[count]
count += 1
try:
lowercase_ : str = requests.get(images['url'] )
if img.status_code == 2_00:
lowercase_ : List[Any] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__( ):
lowercase_ : Tuple = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = ["image_processor", "tokenizer"]
A_ = "BlipImageProcessor"
A_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __a , __a ):
'''simple docstring'''
__a : List[str] = False
super().__init__(__A , __A )
__a : Dict = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__a : Optional[int] = self.tokenizer
__a : Any = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__a : int = self.image_processor(__A , return_tensors=__A )
if text is not None:
__a : Optional[int] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__a : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__A , **__A )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*__A , **__A )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.tokenizer.model_input_names
__a : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 27
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : List[Any] = 9, 1_4 # noqa: F841
UpperCAmelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
UpperCAmelCase : List[Any] = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase : str = mst(_lowercase )
UpperCAmelCase : int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase : List[str] = tuple(answer[:2] )
UpperCAmelCase : str = tuple(edge[::-1] )
assert edge in result or reverse in result
| 368
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
a : List[str] = """Hello, World!"""
a : List[Any] = """en_XX"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : Dict = Path("""data_bin""" )
UpperCAmelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowercase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowercase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowercase )
UpperCAmelCase : List[str] = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : List[str] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowercase )
UpperCAmelCase : str = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : List[str] = model.roberta.encoder.layer[i]
UpperCAmelCase : Optional[Any] = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
UpperCAmelCase : List[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : Any = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
UpperCAmelCase : Any = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
UpperCAmelCase : List[str] = xmod_layer.fca.weight
UpperCAmelCase : str = xmod_layer.fca.bias
# output
UpperCAmelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
UpperCAmelCase : Dict = xmod_layer.fca.weight
UpperCAmelCase : Dict = xmod_layer.fca.bias
UpperCAmelCase : Any = xmod_layer.final_layer_norm.weight
UpperCAmelCase : Union[str, Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : str = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : List[Any] = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Dict = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : int = from_adapter.fca.bias
UpperCAmelCase : Dict = from_adapter.fca.weight
UpperCAmelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : Tuple = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].dense.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].dense.bias
UpperCAmelCase : str = xmod.model.classification_heads["""mnli"""].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
UpperCAmelCase : Dict = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
UpperCAmelCase : Optional[int] = model(_lowercase )[0]
if classification_head:
UpperCAmelCase : List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowercase ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
UpperCAmelCase : Dict = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a : List[str] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 338
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
# ===== initialization =====
__lowercase = Mock()
__lowercase = conn, Mock()
__lowercase = iter([1, None] )
__lowercase = lambda SCREAMING_SNAKE_CASE : next(SCREAMING_SNAKE_CASE )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 325
|
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
def __init__( self : Any , A : Any , A : int=1_3 , A : str=3_0 , A : Optional[int]=2 , A : Optional[Any]=3 , A : List[Any]=True , A : Any=True , A : Dict=3_2 , A : List[Any]=5 , A : Dict=4 , A : Any=3_7 , A : Tuple="gelu" , A : List[str]=0.1 , A : Optional[int]=0.1 , A : List[str]=1_0 , A : Dict=0.02 , A : Optional[int]=None , ):
'''simple docstring'''
a : int = parent
a : Optional[int] = batch_size
a : Dict = image_size
a : Optional[Any] = patch_size
a : Any = num_channels
a : Any = is_training
a : Dict = use_labels
a : Any = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = intermediate_size
a : List[str] = hidden_act
a : Dict = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : List[str] = type_sequence_label_size
a : int = initializer_range
a : List[str] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : int = num_patches + 1
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Tuple = None
if self.use_labels:
a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : str , A : int , A : Any , A : Dict ):
'''simple docstring'''
a : Optional[Any] = ViTMSNModel(config=A )
model.to(A )
model.eval()
a : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str , A : Dict , A : Union[str, Any] , A : List[str] ):
'''simple docstring'''
a : int = self.type_sequence_label_size
a : Dict = ViTMSNForImageClassification(A )
model.to(A )
model.eval()
a : Dict = model(A , labels=A )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a : Union[str, Any] = 1
a : List[str] = ViTMSNForImageClassification(A )
model.to(A )
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Optional[int] = self.prepare_config_and_inputs()
a, a, a : int = config_and_inputs
a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Union[str, Any] = ViTMSNModelTester(self )
a : Dict = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a, a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = model_class(A )
a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ViTMSNModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(2 )
a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(A )
a : Tuple = self.default_image_processor
a : int = prepare_img()
a : Optional[Any] = image_processor(images=A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
a : str = model(**A )
# verify the logits
a : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
a : Tuple = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
| 186
|
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(A_ , A_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
a : List[Any] = False
if num < 0:
a : Optional[int] = True
a : Dict = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("T")
def _lowercase ( lowercase__ ):
return (position - 1) // 2
def _lowercase ( lowercase__ ):
return (2 * position) + 1
def _lowercase ( lowercase__ ):
return (2 * position) + 2
class __lowercase (Generic[T] ):
def __init__( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : list[tuple[T, int]] = []
__lowerCAmelCase : dict[T, int] = {}
__lowerCAmelCase : int = 0
def __len__( self ) ->int:
'''simple docstring'''
return self.elements
def __repr__( self ) ->str:
'''simple docstring'''
return str(self.heap )
def UpperCamelCase__ ( self ) ->bool:
'''simple docstring'''
return self.elements == 0
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
self.heap.append((elem, weight) )
__lowerCAmelCase : Any = self.elements
self.elements += 1
self._bubble_up(A_ )
def UpperCamelCase__ ( self ) ->T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCAmelCase, __lowerCAmelCase : int = self.heap[0]
self._bubble_down(A_ )
return elem
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : str = self.position_map[elem]
__lowerCAmelCase : Optional[int] = (elem, weight)
if position > 0:
__lowerCAmelCase : Optional[Any] = get_parent_position(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(A_ )
else:
self._bubble_down(A_ )
else:
self._bubble_down(A_ )
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCAmelCase : Union[str, Any] = get_parent_position(A_ )
__lowerCAmelCase, __lowerCAmelCase : Dict = self.heap[curr_pos]
__lowerCAmelCase, __lowerCAmelCase : str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(A_ , A_ )
return self._bubble_up(A_ )
return None
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = self.position_map[elem]
__lowerCAmelCase, __lowerCAmelCase : Tuple = self.heap[curr_pos]
__lowerCAmelCase : Union[str, Any] = get_child_left_position(A_ )
__lowerCAmelCase : int = get_child_right_position(A_ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.heap[child_left_position]
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
if child_left_position < self.elements:
__lowerCAmelCase, __lowerCAmelCase : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
else:
return None
if child_right_position < self.elements:
__lowerCAmelCase, __lowerCAmelCase : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(A_ , A_ )
return self._bubble_down(A_ )
return None
def UpperCamelCase__ ( self , A_ , A_ ) ->None:
'''simple docstring'''
__lowerCAmelCase : Dict = self.heap[nodea_pos][0]
__lowerCAmelCase : Optional[int] = self.heap[nodea_pos][0]
__lowerCAmelCase, __lowerCAmelCase : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCAmelCase : Dict = nodea_pos
__lowerCAmelCase : List[str] = nodea_pos
class __lowercase (Generic[T] ):
def __init__( self ) ->None:
'''simple docstring'''
__lowerCAmelCase : dict[T, dict[T, int]] = {}
__lowerCAmelCase : int = 0
def __repr__( self ) ->str:
'''simple docstring'''
return str(self.connections )
def __len__( self ) ->int:
'''simple docstring'''
return self.nodes
def UpperCamelCase__ ( self , A_ ) ->None:
'''simple docstring'''
if node not in self.connections:
__lowerCAmelCase : str = {}
self.nodes += 1
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->None:
'''simple docstring'''
self.add_node(A_ )
self.add_node(A_ )
__lowerCAmelCase : Dict = weight
__lowerCAmelCase : str = weight
def _lowercase ( lowercase__ , ):
__lowerCAmelCase : dict[T, int] = {node: maxsize for node in graph.connections}
__lowerCAmelCase : dict[T, T | None] = {node: None for node in graph.connections}
__lowerCAmelCase : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase__ , lowercase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCAmelCase : str = priority_queue.extract_min()
__lowerCAmelCase : Tuple = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCAmelCase : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
__lowerCAmelCase : Tuple = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCAmelCase : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCAmelCase : Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
__lowerCAmelCase : Optional[int] = node
return dist, parent
| 275
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : Union[str, Any] = position_biased_input
__lowerCAmelCase : int = pos_att_type
__lowerCAmelCase : List[Any] = scope
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_config()
__lowerCAmelCase : Dict = 300
return config
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : List[str] = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Tuple = config_and_inputs
__lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = DebertaModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 275
| 1
|
'''simple docstring'''
from math import isqrt, loga
def _lowerCAmelCase ( lowercase ) -> list[int]:
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowercase , lowercase ):
__lowerCAmelCase = False
return [i for i in range(2 , lowercase ) if is_prime[i]]
def _lowerCAmelCase ( lowercase = 80_0800 , lowercase = 80_0800 ) -> int:
__lowerCAmelCase = degree * loga(lowercase )
__lowerCAmelCase = int(lowercase )
__lowerCAmelCase = calculate_prime_numbers(lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(lowercase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'{solution() = }')
| 46
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE=None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCAmelCase = model
__lowerCAmelCase = kwargs.get("""model_save_dir""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = kwargs.get("""latest_model_name""",__SCREAMING_SNAKE_CASE )
def __call__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {k: np.array(__SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()}
return self.model.run(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCAmelCase = """CPUExecutionProvider"""
return ort.InferenceSession(__SCREAMING_SNAKE_CASE,providers=[provider],sess_options=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCAmelCase = self.model_save_dir.joinpath(__SCREAMING_SNAKE_CASE )
if src_path.exists():
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).joinpath(__SCREAMING_SNAKE_CASE )
try:
shutil.copyfile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
except shutil.SameFileError:
pass
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__SCREAMING_SNAKE_CASE,exist_ok=__SCREAMING_SNAKE_CASE )
# saving model weights/files
self._save_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ),provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE )
# load model from hub
else:
# download model
__lowerCAmelCase = hf_hub_download(
repo_id=__SCREAMING_SNAKE_CASE,filename=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).parent
__lowerCAmelCase = Path(__SCREAMING_SNAKE_CASE ).name
__lowerCAmelCase = OnnxRuntimeModel.load_model(__SCREAMING_SNAKE_CASE,provider=__SCREAMING_SNAKE_CASE,sess_options=__SCREAMING_SNAKE_CASE )
return cls(model=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = None
if len(str(__SCREAMING_SNAKE_CASE ).split("""@""" ) ) == 2:
__lowerCAmelCase , __lowerCAmelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=__SCREAMING_SNAKE_CASE,revision=__SCREAMING_SNAKE_CASE,cache_dir=__SCREAMING_SNAKE_CASE,force_download=__SCREAMING_SNAKE_CASE,use_auth_token=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
| 46
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
snake_case : int = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("RGB" )
return image
def __lowerCAmelCase ( lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : Dict = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( lowercase : str , lowercase : Dict , lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
snake_case : List[str] = dct.pop(lowercase )
snake_case : Union[str, Any] = val
def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case : Optional[int] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
snake_case : Optional[int] = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
snake_case : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
snake_case : Optional[int] = qkv_bias
def __lowerCAmelCase ( lowercase : Tuple , lowercase : int ) -> List[str]:
"""simple docstring"""
snake_case : List[Any] = 364 if "coco" in model_name else 224
snake_case : Any = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case : Union[str, Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
snake_case : Dict = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
snake_case : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case : Any = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
snake_case : int = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase ( lowercase : Dict , lowercase : List[str]=None , lowercase : str=False ) -> Any:
"""simple docstring"""
snake_case : List[str] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
snake_case : Optional[int] = tokenizer("\n" , add_special_tokens=lowercase ).input_ids[0]
snake_case ,snake_case : Any = get_blipa_config(lowercase , eos_token_id=lowercase )
snake_case : Any = BlipaForConditionalGeneration(lowercase ).eval()
snake_case : Tuple = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
snake_case ,snake_case : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
snake_case : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
snake_case ,snake_case ,snake_case : Optional[Any] = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print("Done!" )
# update state dict keys
snake_case : Tuple = original_model.state_dict()
snake_case : Any = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case : Optional[int] = state_dict.pop(lowercase )
if key.startswith("Qformer.bert" ):
snake_case : Optional[int] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
snake_case : Any = key.replace("self" , "attention" )
if "opt_proj" in key:
snake_case : Optional[int] = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
snake_case : str = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
snake_case : int = key.replace("opt" , "language" )
if key.startswith("t5" ):
snake_case : Any = key.replace("t5" , "language" )
snake_case : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
snake_case ,snake_case : Optional[Any] = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case : List[str] = load_demo_image()
snake_case : int = vis_processors["eval"](lowercase ).unsqueeze(0 ).to(lowercase )
snake_case : str = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(lowercase )
# create processor
snake_case : Optional[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=lowercase , image_std=lowercase )
snake_case : Optional[Any] = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
snake_case : Tuple = processor(images=lowercase , return_tensors="pt" ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
snake_case : List[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
snake_case : Union[str, Any] = hf_model(lowercase , lowercase ).logits
else:
snake_case : List[str] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
snake_case : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case : int = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case : str = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase )
else:
# cast to same type
snake_case : List[str] = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
snake_case : Dict = ""
snake_case : List[str] = tokenizer(lowercase , return_tensors="pt" ).input_ids.to(lowercase )
snake_case : Optional[int] = original_model.generate({"image": original_pixel_values} )
snake_case : Optional[int] = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , lowercase )
snake_case : Optional[Any] = input_ids.shape[1]
snake_case : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
snake_case : int = [text.strip() for text in output_text]
print("HF generation:" , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 203
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=12 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=0 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : Dict = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : Optional[int] = use_labels
snake_case : Tuple = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Optional[Any] = projection_dim
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : int = intermediate_size
snake_case : str = dropout
snake_case : List[Any] = attention_dropout
snake_case : Any = max_position_embeddings
snake_case : List[Any] = initializer_range
snake_case : Any = scope
snake_case : Union[str, Any] = bos_token_id
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = None
if self.use_input_mask:
snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case : Tuple = input_mask.numpy()
snake_case ,snake_case : str = input_mask.shape
snake_case : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
snake_case : int = 1
snake_case : Tuple = 0
snake_case : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = TFBlipTextModel(config=UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
snake_case : Optional[int] = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : Tuple = config_and_inputs
snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCAmelCase : Any = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = BlipTextModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__=True ) -> Optional[int]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 203
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase__ : str = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCamelCase ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase, torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase, PIL.Image.Image ):
__UpperCAmelCase : Tuple = [image]
__UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
__UpperCAmelCase : Optional[Any] = torch.stack(_UpperCAmelCase )
return image
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase : List[str] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
def lowerCamelCase_ ( self : int , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
"""simple docstring"""
# get the original timestep using init_timestep
__UpperCAmelCase : int = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}" )
__UpperCAmelCase : Optional[int] = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__UpperCAmelCase : str = init_latents.shape
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
print("add noise to latents at timestep" , UpperCAmelCase_ )
__UpperCAmelCase : int = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
self.check_inputs(UpperCAmelCase_ )
# 2. Preprocess image
__UpperCAmelCase : Dict = preprocess(UpperCAmelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ , device=self.device )
__UpperCAmelCase , __UpperCAmelCase : int = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device )
__UpperCAmelCase : Optional[Any] = timesteps[:1].repeat(UpperCAmelCase_ )
# 4. Prepare latent variables
__UpperCAmelCase : Optional[Any] = self.prepare_latents(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.unet.dtype , self.device , UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase_ ):
# 1. predict noise model_output
__UpperCAmelCase : Union[str, Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase : Any = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , eta=UpperCAmelCase_ , use_clipped_model_output=UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample
__UpperCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : int = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 37
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : UNetaDModel
_snake_case : ScoreSdeVeScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> int:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , _UpperCamelCase = 1 , _UpperCamelCase = 2_0_0_0 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : List[Any] = self.unet.config.sample_size
UpperCAmelCase_ : Optional[Any] = (batch_size, 3, img_size, img_size)
UpperCAmelCase_ : Dict = self.unet
UpperCAmelCase_ : Optional[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
UpperCAmelCase_ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase_ : str = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase_ : str = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
UpperCAmelCase_ : Tuple = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
UpperCAmelCase_ : str = model(_UpperCamelCase , _UpperCamelCase ).sample
UpperCAmelCase_ : List[Any] = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = output.prev_sample, output.prev_sample_mean
UpperCAmelCase_ : Any = sample_mean.clamp(0 , 1 )
UpperCAmelCase_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a =16
a =32
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ) -> int:
__lowerCamelCase : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : Any = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Tuple = 1_6
elif accelerator.mixed_precision != "no":
__lowerCamelCase : Union[str, Any] = 8
else:
__lowerCamelCase : Optional[int] = None
return tokenizer.pad(
lowerCamelCase__ , padding='longest' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a =mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase__ ) == "1":
__lowerCamelCase : Tuple = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowerCamelCase : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__lowerCamelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Tuple = config['lr']
__lowerCamelCase : str = int(config['num_epochs'] )
__lowerCamelCase : Tuple = int(config['seed'] )
__lowerCamelCase : Union[str, Any] = int(config['batch_size'] )
set_seed(lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : int = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : List[str] = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Optional[int] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
__lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowerCamelCase : Any = os.path.split(lowerCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowerCamelCase : Optional[Any] = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Tuple = model(**lowerCamelCase__ )
__lowerCamelCase : Dict = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowerCamelCase : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : Any = model(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
__lowerCamelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(lowerCamelCase__ ),
'epoch': epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowerCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 113
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113
| 1
|
import string
import numpy
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __a )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase_ = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase_ = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__(self : Union[str, Any] , UpperCAmelCase_ : numpy.ndarray) ->None:
'''simple docstring'''
lowerCamelCase__: int =self.modulus(UpperCAmelCase_) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase__: Dict =encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int) ->str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase_)]
def SCREAMING_SNAKE_CASE_ (self : int) ->None:
'''simple docstring'''
lowerCamelCase__: str =round(numpy.linalg.det(self.encrypt_key))
if det < 0:
lowerCamelCase__: str =det % len(self.key_string)
lowerCamelCase__: Optional[int] =len(self.key_string)
if greatest_common_divisor(UpperCAmelCase_ , len(self.key_string)) != 1:
lowerCamelCase__: Tuple =(
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: int =[char for char in text.upper() if char in self.key_string]
lowerCamelCase__: List[str] =chars[-1]
while len(UpperCAmelCase_) % self.break_key != 0:
chars.append(UpperCAmelCase_)
return "".join(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: Any =self.process_text(text.upper())
lowerCamelCase__: List[str] =""
for i in range(0 , len(UpperCAmelCase_) - self.break_key + 1 , self.break_key):
lowerCamelCase__: Union[str, Any] =text[i : i + self.break_key]
lowerCamelCase__: Optional[int] =[self.replace_letters(UpperCAmelCase_) for char in batch]
lowerCamelCase__: Any =numpy.array([vec]).T
lowerCamelCase__: Union[str, Any] =self.modulus(self.encrypt_key.dot(UpperCAmelCase_)).T.tolist()[
0
]
lowerCamelCase__: Dict ="".join(
self.replace_digits(UpperCAmelCase_) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->numpy.ndarray:
'''simple docstring'''
lowerCamelCase__: Tuple =round(numpy.linalg.det(self.encrypt_key))
if det < 0:
lowerCamelCase__: str =det % len(self.key_string)
lowerCamelCase__: Optional[int] =None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
lowerCamelCase__: Optional[int] =i
break
lowerCamelCase__: str =(
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str) ->str:
'''simple docstring'''
lowerCamelCase__: Any =self.make_decrypt_key()
lowerCamelCase__: Any =self.process_text(text.upper())
lowerCamelCase__: Dict =""
for i in range(0 , len(UpperCAmelCase_) - self.break_key + 1 , self.break_key):
lowerCamelCase__: List[str] =text[i : i + self.break_key]
lowerCamelCase__: List[str] =[self.replace_letters(UpperCAmelCase_) for char in batch]
lowerCamelCase__: Dict =numpy.array([vec]).T
lowerCamelCase__: Any =self.modulus(decrypt_key.dot(UpperCAmelCase_)).T.tolist()[0]
lowerCamelCase__: int ="".join(
self.replace_digits(UpperCAmelCase_) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =int(input("Enter the order of the encryption key: " ) )
lowerCamelCase__: str =[]
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__a ):
lowerCamelCase__: List[Any] =[int(__a ) for x in input().split()]
hill_matrix.append(__a )
lowerCamelCase__: Dict =HillCipher(numpy.array(__a ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowerCamelCase__: List[str] =input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowerCamelCase__: Union[str, Any] =input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__a ) )
elif option == "2":
lowerCamelCase__: Optional[int] =input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 10
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__A = "."
if __name__ == "__main__":
__A = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__A = []
__A = []
with open(doctest_file_path) as fp:
for line in fp:
__A = line.strip()
__A = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__A = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 10
| 1
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : int =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : Any =[]
lowerCAmelCase : str =[]
lowerCAmelCase : Optional[Any] ={'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
lowerCAmelCase : Tuple =[
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'''emoji''': True,
},
}
]
lowerCAmelCase : Dict =0
for log in Path().glob('''*.log'''):
lowerCAmelCase : List[Any] =0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase : Optional[int] =json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase : int =line['''nodeid''']
if line.get('''duration''', None) is not None:
lowerCAmelCase : Optional[Any] =F'''{line['duration']:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : Optional[int] =[]
log.unlink()
lowerCAmelCase : List[Any] =''''''
lowerCAmelCase : Optional[int] =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Tuple =[]
lowerCAmelCase : int ={}
for test in failed_tests:
lowerCAmelCase : Dict =test[0].split('''::''')
lowerCAmelCase : int =data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase : List[Any] =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : Union[str, Any] =[test[0] for test in failed_table]
lowerCAmelCase : Any =list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : List[Any] =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : Tuple =tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCAmelCase : Tuple ='''Too many failed tests, please see the full report in the Action results.'''
lowerCAmelCase : Any =len(err) + 10
lowerCAmelCase : Union[str, Any] =message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
lowerCAmelCase : Tuple ='''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase : List[str] =WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase : str ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
lowerCAmelCase : List[Any] ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
lowerCAmelCase : Any ={
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCAmelCase : Dict =client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase : List[Any] =response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : Dict =''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : int =row[0]
else:
lowerCAmelCase : Optional[int] =''''''
lowerCAmelCase : Any ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 147
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class a_ :
def __init__( self : List[Any] , lowercase : dict[str, list[str]] , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = graph
# mapping node to its parent in resulting breadth first tree
lowercase_ :dict[str, str | None] = {}
lowercase_ :Dict = source_vertex
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = {self.source_vertex}
lowercase_ :Tuple = None
lowercase_ :Dict = [self.source_vertex] # first in first out queue
while queue:
lowercase_ :int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase )
lowercase_ :Optional[Any] = vertex
queue.append(lowercase )
def lowercase__ ( self : Tuple , lowercase : str ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase_ :List[str] = self.parent.get(lowercase )
if target_vertex_parent is None:
lowercase_ :Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowercase )
return self.shortest_path(lowercase ) + F'->{target_vertex}'
if __name__ == "__main__":
lowerCAmelCase : Dict =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 147
| 1
|
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : str ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 104
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157
| 0
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __a ( unittest.TestCase ):
@slow
def A ( self : Any ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCAmelCase ):
lowerCAmelCase_ : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[str] = FlaxAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def A ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : str = FlaxBertModel.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Dict = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : List[str] ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
@slow
def A ( self : Tuple ):
for model_name in ["roberta-base", "roberta-large"]:
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxRobertaModel.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCAmelCase : Dict ):
return model(**UpperCAmelCase )
eval(**UpperCAmelCase ).block_until_ready()
def A ( self : Tuple ):
with self.assertRaisesRegex(
UpperCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ : Tuple = FlaxAutoModel.from_pretrained("""bert-base""" )
def A ( self : Union[str, Any] ):
with self.assertRaisesRegex(
UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained(UpperCAmelCase , revision="""aaaaaa""" )
def A ( self : List[Any] ):
with self.assertRaisesRegex(
UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
lowerCAmelCase_ : Any = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def A ( self : Optional[int] ):
with self.assertRaisesRegex(UpperCAmelCase , """Use `from_pt=True` to load this model""" ):
lowerCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 351
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : int = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCAmelCase : Dict = tokenizer('Hello there' , return_tensors='np' ).input_ids
UpperCAmelCase : Dict = tokenizer('Hi I am' , return_tensors='np' ).input_ids
UpperCAmelCase : Optional[int] = shift_tokens_right(a_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[Any] = model(a_ , decoder_input_ids=a_ ).logits
UpperCAmelCase : Optional[Any] = optax.softmax_cross_entropy(a_ , onehot(a_ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] = -(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 151
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : str = """▁"""
SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =BigBirdTokenizerFast
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = '''<s>'''
__snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(a_ ) , 10_04 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_rust_tokenizer()
__snake_case : Dict = '''I was born in 92000, and this is falsé.'''
__snake_case : int = tokenizer.tokenize(a_ )
__snake_case : str = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Tuple = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer(a_ , keep_accents=a_ )
__snake_case : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] , )
__snake_case : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''Hello World!'''
__snake_case : List[Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case : Optional[int] = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Tuple = ''' '''.join(a_ )
__snake_case : Tuple = self.big_tokenizer.encode_plus(a_ , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=a_ )
__snake_case : Optional[int] = BigBirdConfig(attention_type='''original_full''' )
__snake_case : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__snake_case : Any = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 102
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCAmelCase_ = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
@lru_cache()
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class lowerCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str="replace" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : List[str]="<unk>" , _UpperCAmelCase : Dict="<pad>" , _UpperCAmelCase : Optional[Any]="<mask>" , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ = json.load(_UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(_UpperCAmelCase )
UpperCAmelCase__ = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(_UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(_UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(_UpperCAmelCase )
UpperCAmelCase__ = """ """.join(_UpperCAmelCase )
UpperCAmelCase__ = word
return word
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = []
for token in re.findall(self.pat , _UpperCAmelCase ):
UpperCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
return self.decoder.get(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """""".join(_UpperCAmelCase )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" )
UpperCAmelCase__ = 0
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ = token_index
writer.write(""" """.join(_UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = """ """ + text
return (text, kwargs)
| 352
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """dpt"""
def __init__( self : Optional[Any] , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=30_72 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : List[Any]=1E-12 , _UpperCAmelCase : int=3_84 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=[2, 5, 8, 11] , _UpperCAmelCase : Any="project" , _UpperCAmelCase : Optional[Any]=[4, 2, 1, 0.5] , _UpperCAmelCase : Tuple=[96, 1_92, 3_84, 7_68] , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=0.4 , _UpperCAmelCase : Union[str, Any]=2_55 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Tuple=[1, 10_24, 24, 24] , _UpperCAmelCase : Union[str, Any]=[0, 1] , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCAmelCase__ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
UpperCAmelCase__ = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCAmelCase__ = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
UpperCAmelCase__ = backbone_featmap_shape
UpperCAmelCase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = []
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
UpperCAmelCase__ = readout_type
UpperCAmelCase__ = reassemble_factors
UpperCAmelCase__ = neck_hidden_sizes
UpperCAmelCase__ = fusion_hidden_size
UpperCAmelCase__ = head_in_index
UpperCAmelCase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = semantic_loss_ignore_index
UpperCAmelCase__ = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: str = logging.get_logger(__name__)
def lowerCAmelCase_ ( A_ ,A_ ,A_):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def lowerCAmelCase_ ( A_ ,A_ ,A_ = None):
UpperCamelCase__: List[str] = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__: Optional[int] = to_pil_image(A_)
UpperCamelCase__ , UpperCamelCase__: Tuple = pil_image.size
UpperCamelCase__: List[Any] = pytesseract.image_to_data(A_ ,lang=A_ ,output_type="dict" ,config=A_)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__: List[Any] = [idx for idx, word in enumerate(A_) if not word.strip()]
UpperCamelCase__: Union[str, Any] = [word for idx, word in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Dict = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: List[Any] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Optional[int] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
UpperCamelCase__: Optional[Any] = [coord for idx, coord in enumerate(A_) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__: List[str] = []
for x, y, w, h in zip(A_ ,A_ ,A_ ,A_):
UpperCamelCase__: str = [x, y, x + w, y + h]
actual_boxes.append(A_)
# finally, normalize the bounding boxes
UpperCamelCase__: Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A_ ,A_ ,A_))
assert len(A_) == len(A_), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self: int , __lowerCamelCase: bool = True , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = "" , **__lowerCamelCase: str , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: Optional[Any] = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase__: Dict = get_size_dict(__lowerCamelCase )
UpperCamelCase__: Optional[Any] = do_resize
UpperCamelCase__: Optional[int] = size
UpperCamelCase__: int = resample
UpperCamelCase__: str = apply_ocr
UpperCamelCase__: List[Any] = ocr_lang
UpperCamelCase__: List[Any] = tesseract_config
def UpperCAmelCase_ ( self: int , __lowerCamelCase: np.ndarray , __lowerCamelCase: Dict[str, int] , __lowerCamelCase: PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase: Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: int = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase__: int = (size["height"], size["width"])
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: ImageInput , __lowerCamelCase: bool = None , __lowerCamelCase: Dict[str, int] = None , __lowerCamelCase: PILImageResampling = None , __lowerCamelCase: bool = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[Union[str, TensorType]] = None , __lowerCamelCase: ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase: str , ):
'''simple docstring'''
UpperCamelCase__: str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__: Any = size if size is not None else self.size
UpperCamelCase__: Union[str, Any] = get_size_dict(__lowerCamelCase )
UpperCamelCase__: Tuple = resample if resample is not None else self.resample
UpperCamelCase__: int = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__: Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__: Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__: Any = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__: Union[str, Any] = [to_numpy_array(__lowerCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__: str = []
UpperCamelCase__: Optional[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__: Any = apply_tesseract(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
words_batch.append(__lowerCamelCase )
boxes_batch.append(__lowerCamelCase )
if do_resize:
UpperCamelCase__: List[Any] = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__: List[str] = [flip_channel_order(__lowerCamelCase ) for image in images]
UpperCamelCase__: Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase__: int = BatchFeature(data={"pixel_values": images} , tensor_type=__lowerCamelCase )
if apply_ocr:
UpperCamelCase__: Dict = words_batch
UpperCamelCase__: Tuple = boxes_batch
return data
| 149
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def lowerCamelCase ( _UpperCamelCase : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for item in point:
if not isinstance(lowerCAmelCase__ , (int, float) ):
__UpperCAmelCase : str = (
"""Expected a list of numbers as input, found """
f'''{type(lowerCAmelCase__ ).__name__}'''
)
raise TypeError(lowerCAmelCase__ )
else:
__UpperCAmelCase : Optional[int] = f'''Expected a list of numbers as input, found {type(lowerCAmelCase__ ).__name__}'''
raise TypeError(lowerCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 320
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE__ : List[str] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> str:
lowerCamelCase : Tuple = None
lowerCamelCase : str = os.path.abspath(os.path.join("examples" , "by_feature" ) )
lowerCamelCase : str = os.path.abspath("examples" )
for item in os.listdir(UpperCamelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCamelCase : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCamelCase__ , feature_script=UpperCamelCase__ , tested_section="main()" if parser_only else "training_function()" , ):
lowerCamelCase : Any = compare_against_test(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = "\n".join(UpperCamelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCamelCase : Tuple = diff.replace(UpperCamelCase__ , "" )
self.assertEqual(UpperCamelCase__ , "" )
def _lowercase ( self ) -> List[Any]:
self.one_complete_example("complete_nlp_example.py" , UpperCamelCase__ )
self.one_complete_example("complete_nlp_example.py" , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
lowerCamelCase : Tuple = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.one_complete_example("complete_cv_example.py" , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = False
@classmethod
def _lowercase ( cls ) -> str:
super().setUpClass()
lowerCamelCase : str = tempfile.mkdtemp()
lowerCamelCase : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
lowerCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _lowercase ( cls ) -> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCamelCase : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
lowerCamelCase : List[Any] = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
self.assertNotIn("epoch 0:" , UpperCamelCase__ )
self.assertIn("epoch 1:" , UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : int = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
if torch.cuda.is_available():
lowerCamelCase : Optional[Any] = torch.cuda.device_count()
else:
lowerCamelCase : str = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , UpperCamelCase__ )
self.assertIn("epoch 1:" , UpperCamelCase__ )
else:
self.assertIn("epoch 0:" , UpperCamelCase__ )
self.assertIn("epoch 1:" , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Union[str, Any] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
lowerCamelCase : Tuple = run_command(self._launch_args + testargs , return_stdout=UpperCamelCase__ )
lowerCamelCase : Optional[int] = re.findall("({.+})" , UpperCamelCase__ )
lowerCamelCase : Any = [r for r in results if "accuracy" in r][-1]
lowerCamelCase : List[str] = ast.literal_eval(UpperCamelCase__ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def _lowercase ( self ) -> str:
lowerCamelCase : str = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _lowercase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCamelCase : Optional[int] = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "tracking" ) ) )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 48
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[10, 20, 30, 40] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Optional[int] = embeddings_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
_SCREAMING_SNAKE_CASE : str = depths
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : int = num_labels
_SCREAMING_SNAKE_CASE : str = scope
_SCREAMING_SNAKE_CASE : Any = len(__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = FlaxRegNetModel(config=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetForImageClassification(config=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A_ : Union[str, Any] = False
A_ : List[str] = False
A_ : str = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = FlaxRegNetModelTester(self )
_SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def UpperCAmelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
return
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def UpperCAmelCase_ ( self ):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_SCREAMING_SNAKE_CASE : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case , **__snake_case ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : Any = model_jitted(**__snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Optional[int] = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
_SCREAMING_SNAKE_CASE : Tuple = image_processor(images=__snake_case , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE : str = model(**__snake_case )
# verify the logits
_SCREAMING_SNAKE_CASE : Tuple = (1, 1000)
self.assertEqual(outputs.logits.shape , __snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 200
| 0
|
import os
from pathlib import Path
def lowerCAmelCase_ ( ) -> List[str]:
from torch.utils.cpp_extension import load
UpperCamelCase__ : Optional[int] = Path(__UpperCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
UpperCamelCase__ : Union[str, Any] = [
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCAmelCase , with_cuda=__UpperCAmelCase , extra_include_paths=[str(__UpperCAmelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247
|
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[Any] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : Tuple = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 247
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> Tuple:
lowerCAmelCase = path_or_paths
lowerCAmelCase = split if split or isinstance(lowercase , lowercase ) else """train"""
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> str:
lowerCAmelCase = features
lowerCAmelCase = cache_dir
lowerCAmelCase = keep_in_memory
lowerCAmelCase = streaming
lowerCAmelCase = num_proc
lowerCAmelCase = kwargs
@abstractmethod
def _snake_case ( self ) -> Union[Dataset, IterableDataset]:
pass
| 46
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase = unk_token if pad_token is None else pad_token
lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase = re.compile(
f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase )
# Normalize whitespaces
lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase )
return text
def _snake_case ( self , lowercase , **lowercase ) -> List[str]:
lowerCAmelCase = self.preprocess_text(lowercase )
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.sp_model.PieceToId(lowercase )
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.IdToPiece(lowercase )
@staticmethod
def _snake_case ( lowercase ) -> str:
return out_string
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = []
lowerCAmelCase = """"""
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
lowerCAmelCase = False
out_string += self.sp_model.decode(lowercase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowercase , lowercase ):
lowerCAmelCase = self.preprocess_text(lowercase )
lowerCAmelCase = self.sp_model.encode(lowercase )
else:
lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text]
lowerCAmelCase = self.sp_model.encode(lowercase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase = torch.tensor(lowercase )
return token_ids
def _snake_case ( self , lowercase ) -> str:
return self.sp_model.decode(lowercase )
def _snake_case ( self , lowercase ) -> List[int]:
lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowercase )
| 46
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__magic_name__ = 50003
__magic_name__ = 50002
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PLBartTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PLBartTokenizer(_snake_case , language_codes='''base''' , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = PLBartTokenizer(_snake_case , language_codes='''base''' , keep_accents=_snake_case )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
UpperCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
UpperCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = PLBartTokenizer(_snake_case , language_codes='''multi''' , keep_accents=_snake_case )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
UpperCAmelCase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
UpperCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """uclanlp/plbart-python-en_XX"""
__SCREAMING_SNAKE_CASE = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
__SCREAMING_SNAKE_CASE = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
__SCREAMING_SNAKE_CASE = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def snake_case_ ( cls ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
UpperCAmelCase = 1
return cls
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0003 )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
UpperCAmelCase = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _snake_case )
UpperCAmelCase = 10
UpperCAmelCase = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0004, 5_0001] )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
UpperCAmelCase = PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''' )
UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors='''pt''' )
UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors='''pt''' )
UpperCAmelCase = targets['''input_ids''']
UpperCAmelCase = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 5_0003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0001,
} , )
| 152
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def _lowerCAmelCase ( A__: Path , A__: list ):
'''simple docstring'''
UpperCAmelCase = '''\n'''.join(A__ )
Path(A__ ).open('''w''' ).writelines(A__ )
__magic_name__ = "patrickvonplaten/t5-tiny-random"
__magic_name__ = "sshleifer/bart-tiny-random"
__magic_name__ = "sshleifer/tiny-mbart"
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case )
UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case ):
run_generate()
assert Path(_snake_case ).exists()
# os.remove(Path(output_file_name))
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
self.run_eval_tester(_snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
UpperCAmelCase = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
UpperCAmelCase = str(tmp_dir / '''scores.json''' )
UpperCAmelCase = str(tmp_dir / '''val.target''' )
_dump_articles(_snake_case , text['''en'''] )
_dump_articles(_snake_case , text['''de'''] )
UpperCAmelCase = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase = f"""
run_eval_search.py
{model}
{str(_snake_case )}
{str(_snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(_snake_case , '''argv''' , _snake_case ):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(_snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case ).exists()
os.remove(Path(_snake_case ) )
| 152
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_lowerCAmelCase = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
_lowerCAmelCase = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
_lowerCAmelCase = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) ,reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> List[Any]:
if return_pvalue:
lowerCAmelCase__ : Optional[int] = pearsonr(__UpperCAmelCase ,__UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCAmelCase ,__UpperCAmelCase )[0] )}
| 37
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Tuple:
super().__init__(**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int:
lowerCAmelCase__ : str = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : List[Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,__UpperCAmelCase ):
lowerCAmelCase__ : int = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Dict = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 37
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
lowerCAmelCase_ = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = DistilBertTokenizer
def __init__( self : Tuple , _A : str=None , _A : List[str]=None , _A : Any=True , _A : str="[UNK]" , _A : List[str]="[SEP]" , _A : str="[PAD]" , _A : Optional[Any]="[CLS]" , _A : Tuple="[MASK]" , _A : str=True , _A : Tuple=None , **_A : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _A ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _A ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars
):
lowercase : Tuple = getattr(_A , normalizer_state.pop('''type''' ) )
lowercase : Union[str, Any] = do_lower_case
lowercase : str = strip_accents
lowercase : Optional[Any] = tokenize_chinese_chars
lowercase : List[str] = normalizer_class(**_A )
lowercase : int = do_lower_case
def __a ( self : List[Any] , _A : Dict , _A : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : int = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase : List[str] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 369
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _A ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str , _A : float , _A : Callable , _A : int , _A : float = 1.0 , _A : str = None , ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase : List[str] = initial_learning_rate
lowercase : List[str] = warmup_steps
lowercase : Tuple = power
lowercase : Any = decay_schedule_fn
lowercase : Union[str, Any] = name
def __call__( self : str , _A : Any ) -> Optional[int]:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : List[Any] = tf.cast(_A , tf.floataa )
lowercase : Union[str, Any] = tf.cast(self.warmup_steps , tf.floataa )
lowercase : List[str] = global_step_float / warmup_steps_float
lowercase : int = self.initial_learning_rate * tf.math.pow(_A , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_A , )
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 0.0 , __magic_name__ = 0.9 , __magic_name__ = 0.9_9_9 , __magic_name__ = 1e-8 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0.0 , __magic_name__ = 1.0 , __magic_name__ = None , ) -> int:
'''simple docstring'''
lowercase : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__magic_name__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__magic_name__ , )
if num_warmup_steps:
lowercase : Optional[int] = WarmUp(
initial_learning_rate=__magic_name__ , decay_schedule_fn=__magic_name__ , warmup_steps=__magic_name__ , )
if weight_decay_rate > 0.0:
lowercase : Optional[Any] = AdamWeightDecay(
learning_rate=__magic_name__ , weight_decay_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__magic_name__ , )
else:
lowercase : str = tf.keras.optimizers.Adam(
learning_rate=__magic_name__ , beta_a=__magic_name__ , beta_a=__magic_name__ , epsilon=__magic_name__ , clipnorm=__magic_name__ , global_clipnorm=__magic_name__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _A ( _lowerCamelCase ):
def __init__( self : Optional[Any] , _A : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _A : float = 0.9 , _A : float = 0.999 , _A : float = 1E-7 , _A : bool = False , _A : float = 0.0 , _A : Optional[List[str]] = None , _A : Optional[List[str]] = None , _A : str = "AdamWeightDecay" , **_A : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_A , _A , _A , _A , _A , _A , **_A )
lowercase : Tuple = weight_decay_rate
lowercase : List[str] = include_in_weight_decay
lowercase : Optional[Any] = exclude_from_weight_decay
@classmethod
def __a ( cls : Tuple , _A : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = {'''WarmUp''': WarmUp}
return super(_A , cls ).from_config(_A , custom_objects=_A )
def __a ( self : Dict , _A : Tuple , _A : Dict , _A : Tuple ) -> Tuple:
"""simple docstring"""
super(_A , self )._prepare_local(_A , _A , _A )
lowercase : List[Any] = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __a ( self : Tuple , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] ) -> Any:
"""simple docstring"""
lowercase : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self : Union[str, Any] , _A : Any , _A : Optional[Any]=None , **_A : Tuple ) -> Any:
"""simple docstring"""
lowercase , lowercase : Tuple = list(zip(*_A ) )
return super(_A , self ).apply_gradients(zip(_A , _A ) , name=_A , **_A )
def __a ( self : List[Any] , _A : Optional[Any] , _A : str , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Any = apply_state or {}
lowercase : str = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : List[Any] = self._fallback_apply_state(_A , _A )
lowercase : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : str=None ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : Optional[Any] = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_dense(_A , _A , **_A )
def __a ( self : Optional[int] , _A : List[Any] , _A : Dict , _A : Union[str, Any] , _A : str=None ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _A )
lowercase : str = self._decay_weights_op(_A , _A , _A )
with tf.control_dependencies([decay] ):
return super(_A , self )._resource_apply_sparse(_A , _A , _A , **_A )
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __a ( self : str , _A : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_A , _A ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_A , _A ) is not None:
return False
return True
class _A ( _lowerCamelCase ):
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = []
lowercase : str = None
@property
def __a ( self : Any ) -> int:
"""simple docstring"""
if self._accum_steps is None:
lowercase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , _A : int ) -> str:
"""simple docstring"""
if not self._gradients:
lowercase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_A ) , trainable=_A , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_A ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(_A )}""" )
for accum_gradient, gradient in zip(self._gradients , _A ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_A )
self._accum_steps.assign_add(1 )
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_A ) )
| 116
| 0
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _A ( lowercase__ , lowercase__ ):
lowercase__ = f'''{sampling_rate}'''
lowercase__ = '1'
lowercase__ = 'f32le'
lowercase__ = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ = ffmpeg_process.communicate(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
lowercase__ = output_stream[0]
lowercase__ = np.frombuffer(_SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _A ( lowercase__ , lowercase__ , lowercase__ = "f32le" , ):
lowercase__ = f'''{sampling_rate}'''
lowercase__ = '1'
if format_for_conversion == "s16le":
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase__ = platform.system()
if system == "Linux":
lowercase__ = 'alsa'
lowercase__ = 'default'
elif system == "Darwin":
lowercase__ = 'avfoundation'
lowercase__ = ':0'
elif system == "Windows":
lowercase__ = 'dshow'
lowercase__ = 'default'
lowercase__ = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ = _ffmpeg_stream(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def _A ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = "f32le" , ):
if stream_chunk_s is not None:
lowercase__ = stream_chunk_s
else:
lowercase__ = chunk_length_s
lowercase__ = ffmpeg_microphone(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , format_for_conversion=_SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase__ = np.intaa
lowercase__ = 2
elif format_for_conversion == "f32le":
lowercase__ = np.floataa
lowercase__ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase__ = chunk_length_s / 6
lowercase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
lowercase__ = [stride_length_s, stride_length_s]
lowercase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ = datetime.datetime.now()
lowercase__ = datetime.timedelta(seconds=_SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=_SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase__ = np.frombuffer(item["""raw"""] , dtype=_SCREAMING_SNAKE_CASE )
lowercase__ = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
lowercase__ = b''
lowercase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase__ = 0
for raw in iterator:
acc += raw
if stream and len(_SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase__ = (_stride_left, stride_right)
lowercase__ = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase__ = False
yield item
lowercase__ = stride_left
lowercase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_SCREAMING_SNAKE_CASE ) > stride_left:
lowercase__ = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase__ = False
yield item
def _A ( lowercase__ , lowercase__ ):
lowercase__ = 2**24 # 16Mo
try:
with subprocess.Popen(_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=_SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase__ = ffmpeg_process.stdout.read(_SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 164
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
if rng is None:
a__: Any = global_rng
a__: int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=4_00 , lowercase=20_00 , lowercase=1 , lowercase=0.0 , lowercase=1_60_00 , lowercase=True , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = parent
a__: Optional[int] = batch_size
a__: Optional[Any] = min_seq_length
a__: Optional[int] = max_seq_length
a__: Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__: Dict = feature_size
a__: Any = padding_value
a__: Optional[Any] = sampling_rate
a__: Optional[Any] = return_attention_mask
a__: str = do_normalize
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self , lowercase=False , lowercase=False) -> Tuple:
'''simple docstring'''
def _flatten(lowercase):
return list(itertools.chain(*lowercase))
if equal_length:
a__: Dict = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
a__: List[Any] = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
a__: str = [np.asarray(lowercase) for x in speech_inputs]
return speech_inputs
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = WavaVecaFeatureExtractor
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = WavaVecaFeatureExtractionTester(self)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0) - 1) < 1e-3))
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
a__: Optional[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: List[str] = [np.asarray(lowercase) for speech_input in speech_inputs]
# Test not batched input
a__: Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='np').input_values
a__: Dict = feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test batched
a__: Dict = feat_extract(lowercase , return_tensors='np').input_values
a__: int = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
# Test 2-D numpy arrays are batched.
a__: int = [floats_list((1, x))[0] for x in (8_00, 8_00, 8_00)]
a__: Union[str, Any] = np.asarray(lowercase)
a__: int = feat_extract(lowercase , return_tensors='np').input_values
a__: Any = feat_extract(lowercase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3))
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: List[Any] = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Optional[int] = ['longest', 'max_length', 'do_not_pad']
a__: List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: Dict = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors='np')
a__: Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self.assertTrue(input_values[0][8_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self.assertTrue(input_values[0][10_00:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Optional[int] = range(8_00 , 14_00 , 2_00)
a__: List[str] = [floats_list((1, x))[0] for x in lengths]
a__: Tuple = ['longest', 'max_length', 'do_not_pad']
a__: Dict = [None, 16_00, None]
for max_length, padding in zip(lowercase , lowercase):
a__: int = feat_extract(lowercase , max_length=lowercase , padding=lowercase)
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00])
self._check_zero_mean_unit_variance(input_values[1][:10_00])
self._check_zero_mean_unit_variance(input_values[2][:12_00])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Any = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Dict = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='max_length' , return_tensors='np')
a__: int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: int = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: str = feat_extract(
lowercase , truncation=lowercase , max_length=10_00 , padding='longest' , return_tensors='np')
a__: Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00))
a__: Dict = [floats_list((1, x))[0] for x in range(8_00 , 14_00 , 2_00)]
a__: Tuple = feat_extract(
lowercase , truncation=lowercase , max_length=20_00 , padding='longest' , return_tensors='np')
a__: str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00])
self._check_zero_mean_unit_variance(input_values[1, :10_00])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00))
@require_torch
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
import torch
a__: Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
a__: Tuple = np.random.rand(1_00).astype(np.floataa)
a__: Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__: Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
a__: Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
a__: str = WavaVecaConfig.from_pretrained(lowercase)
a__: str = WavaVecaFeatureExtractor.from_pretrained(lowercase)
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer')
| 290
| 0
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_UpperCAmelCase = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
_UpperCAmelCase = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]:
__lowerCAmelCase : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__lowerCAmelCase : str = state_dict.pop(SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith("""emb.""" ):
__lowerCAmelCase : Optional[Any] = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__lowerCAmelCase : List[str] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__lowerCAmelCase : str = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__lowerCAmelCase : Any = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__lowerCAmelCase : str = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__lowerCAmelCase : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__lowerCAmelCase : Any = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__lowerCAmelCase : str = """rwkv.""" + name
__lowerCAmelCase : int = weight
return state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any]=None , SCREAMING_SNAKE_CASE :Optional[int]=None , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=None ) -> Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__lowerCAmelCase : Any = 50_277
__lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__lowerCAmelCase : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
# 2. Build the config
__lowerCAmelCase : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowerCAmelCase : Union[str, Any] = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
__lowerCAmelCase : Dict = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__lowerCAmelCase : Optional[Any] = hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__lowerCAmelCase : Dict = convert_state_dict(SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__lowerCAmelCase , __lowerCAmelCase : int = shard_checkpoint(SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if index is not None:
__lowerCAmelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save the index as well
with open(SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Optional[int] = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + """\n"""
f.write(SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__lowerCAmelCase : int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowerCAmelCase : List[Any] = torch.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__lowerCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE )
model.push_to_hub(SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
_UpperCAmelCase = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 232
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCAmelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_UpperCAmelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(_snake_case )]
__lowerCAmelCase : Tuple = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__lowerCAmelCase : List[Any] = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 232
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any ='big_bird'
def __init__( self , SCREAMING_SNAKE_CASE_=5_0358 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=66 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , sep_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase :str = vocab_size
UpperCamelCase :Union[str, Any] = max_position_embeddings
UpperCamelCase :Any = hidden_size
UpperCamelCase :List[str] = num_hidden_layers
UpperCamelCase :Any = num_attention_heads
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :str = hidden_act
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :Tuple = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = initializer_range
UpperCamelCase :Optional[int] = type_vocab_size
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[int] = rescale_embeddings
UpperCamelCase :Tuple = attention_type
UpperCamelCase :int = use_bias
UpperCamelCase :List[Any] = block_size
UpperCamelCase :Dict = num_random_blocks
UpperCamelCase :Any = classifier_dropout
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase :Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase :List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 259
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any="attention" ):
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[int] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
if split_mlp_wi:
UpperCamelCase :List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :str = (wi_a, wi_a)
else:
UpperCamelCase :Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :Tuple = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :List[Any] = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Dict = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Any = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Any = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Optional[Any] = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[str] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :str = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :Any = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Optional[int] = o.T
UpperCamelCase :Tuple = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Tuple = layer_norm
UpperCamelCase :Optional[Any] = k.T
UpperCamelCase :List[str] = o.T
UpperCamelCase :List[str] = q.T
UpperCamelCase :str = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[str] = wi[0].T
UpperCamelCase :str = wi[1].T
else:
UpperCamelCase :Dict = wi.T
UpperCamelCase :Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Tuple = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :List[Any] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Dict = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :Any = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :List[str] = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 259
| 1
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
__UpperCAmelCase = {
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = VOCAB_FILES_NAMES
UpperCAmelCase_ :List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :str = ["input_ids", "attention_mask"]
def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ) -> None:
lowerCAmelCase_ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ :Any = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase_ :str = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase_ :Dict = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase_ :Any = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase_ :Dict = unk_token if pad_token is None else pad_token
lowerCAmelCase_ :List[Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase_ :Any = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase_ :Any = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :List[Any] = do_lower_case
lowerCAmelCase_ :Dict = remove_space
lowerCAmelCase_ :Optional[int] = keep_accents
lowerCAmelCase_ :int = vocab_file
lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase_ :List[str] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase_ :int = re.compile(
f"""[{"".join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = self.__dict__.copy()
lowerCAmelCase_ :str = None
return state
def __setstate__( self , __A ) -> List[Any]:
lowerCAmelCase_ :int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self ) -> int:
return len(self.sp_model )
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :Any = self.non_printing_characters_re.sub("""""" , __A )
# Normalize whitespaces
lowerCAmelCase_ :str = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase_ :Union[str, Any] = unicodedata.normalize("""NFC""" , __A )
return text
def __lowerCAmelCase ( self , __A , **__A ) -> List[str]:
lowerCAmelCase_ :Dict = self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def __lowerCAmelCase ( self , __A ) -> int:
return self.sp_model.PieceToId(__A )
def __lowerCAmelCase ( self , __A ) -> str:
return self.sp_model.IdToPiece(__A )
@staticmethod
def __lowerCAmelCase ( __A ) -> str:
return out_string
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :int = []
lowerCAmelCase_ :int = """"""
lowerCAmelCase_ :List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :Optional[int] = []
else:
current_sub_tokens.append(__A )
lowerCAmelCase_ :str = False
out_string += self.sp_model.decode(__A )
return out_string
def __lowerCAmelCase ( self ) -> Dict[str, int]:
lowerCAmelCase_ :int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :Optional[int] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __lowerCAmelCase ( self , __A , __A = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__A , __A ):
lowerCAmelCase_ :Dict = self.preprocess_text(__A )
lowerCAmelCase_ :int = self.sp_model.encode(__A )
else:
lowerCAmelCase_ :List[str] = [self.preprocess_text(__A ) for t in text]
lowerCAmelCase_ :List[Any] = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase_ :Dict = torch.tensor(__A )
return token_ids
def __lowerCAmelCase ( self , __A ) -> str:
return self.sp_model.decode(__A )
def __lowerCAmelCase ( self , __A ) -> List[int]:
lowerCAmelCase_ :str = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase_ :Optional[Any] = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__A ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__A )
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class A ( _a ):
UpperCamelCase_ : str ='''roberta-prelayernorm'''
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=7_6_8 , lowerCAmelCase=1_2 , lowerCAmelCase=1_2 , lowerCAmelCase=3_0_7_2 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= position_embedding_type
__lowercase= use_cache
__lowercase= classifier_dropout
class A ( _a ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 295
|
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28
| 0
|
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_UpperCamelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any]=0.999 , __UpperCAmelCase: Tuple="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase: List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase: List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCamelCase__ : Dict = []
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
a : Union[str, Any] = 2
@register_to_config
def __init__( self, __magic_name__ = 1000, __magic_name__ = 0.0_0085, __magic_name__ = 0.012, __magic_name__ = "linear", __magic_name__ = None, __magic_name__ = "epsilon", __magic_name__ = "linspace", __magic_name__ = 0, ) -> Tuple:
"""simple docstring"""
if trained_betas is not None:
UpperCamelCase__ : int = torch.tensor(__magic_name__, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase__ : Dict = torch.linspace(__magic_name__, __magic_name__, __magic_name__, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ : List[str] = (
torch.linspace(beta_start**0.5, beta_end**0.5, __magic_name__, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ : str = betas_for_alpha_bar(__magic_name__ )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCamelCase__ : Optional[int] = 1.0 - self.betas
UpperCamelCase__ : List[Any] = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> str:
"""simple docstring"""
if schedule_timesteps is None:
UpperCamelCase__ : Dict = self.timesteps
UpperCamelCase__ : Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase__ : List[str] = 1 if len(__magic_name__ ) > 1 else 0
else:
UpperCamelCase__ : List[Any] = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
UpperCamelCase__ : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.index_for_timestep(__magic_name__ )
if self.state_in_first_order:
UpperCamelCase__ : str = self.sigmas[step_index]
else:
UpperCamelCase__ : Optional[int] = self.sigmas_interpol[step_index]
UpperCamelCase__ : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = num_inference_steps
UpperCamelCase__ : Tuple = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase__ : Union[str, Any] = np.linspace(0, num_train_timesteps - 1, __magic_name__, dtype=__magic_name__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase__ : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : List[str] = (np.arange(0, __magic_name__ ) * step_ratio).round()[::-1].copy().astype(__magic_name__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase__ : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : List[str] = (np.arange(__magic_name__, 0, -step_ratio )).round().copy().astype(__magic_name__ )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCamelCase__ : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase__ : Optional[Any] = torch.from_numpy(np.log(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : Any = np.interp(__magic_name__, np.arange(0, len(__magic_name__ ) ), __magic_name__ )
UpperCamelCase__ : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase__ : Any = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ )
# interpolate sigmas
UpperCamelCase__ : int = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase__ : List[str] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase__ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__magic_name__ ).startswith('''mps''' ):
# mps does not support float64
UpperCamelCase__ : Optional[Any] = torch.from_numpy(__magic_name__ ).to(__magic_name__, dtype=torch.floataa )
else:
UpperCamelCase__ : List[Any] = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
# interpolate timesteps
UpperCamelCase__ : str = self.sigma_to_t(__magic_name__ ).to(__magic_name__, dtype=timesteps.dtype )
UpperCamelCase__ : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase__ : Optional[int] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase__ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase__ : Dict = defaultdict(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
# get log sigma
UpperCamelCase__ : Any = sigma.log()
# get distribution
UpperCamelCase__ : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase__ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase__ : Optional[Any] = low_idx + 1
UpperCamelCase__ : str = self.log_sigmas[low_idx]
UpperCamelCase__ : int = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase__ : List[Any] = (low - log_sigma) / (low - high)
UpperCamelCase__ : str = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase__ : Tuple = (1 - w) * low_idx + w * high_idx
UpperCamelCase__ : int = t.view(sigma.shape )
return t
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.sample is None
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.index_for_timestep(__magic_name__ )
# advance index counter by 1
UpperCamelCase__ : int = timestep.cpu().item() if torch.is_tensor(__magic_name__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase__ : Optional[Any] = self.sigmas[step_index]
UpperCamelCase__ : Union[str, Any] = self.sigmas_interpol[step_index + 1]
UpperCamelCase__ : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase__ : Tuple = self.sigmas[step_index - 1]
UpperCamelCase__ : Tuple = self.sigmas_interpol[step_index]
UpperCamelCase__ : Dict = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase__ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase__ : List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase__ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase__ : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase__ : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase__ : Dict = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase__ : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase__ : Union[str, Any] = sigma_next - sigma_hat
UpperCamelCase__ : Union[str, Any] = self.sample
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : List[str] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__magic_name__ ):
# mps does not support float64
UpperCamelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase__ : Tuple = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase__ : str = self.timesteps.to(original_samples.device )
UpperCamelCase__ : int = timesteps.to(original_samples.device )
UpperCamelCase__ : Any = [self.index_for_timestep(__magic_name__, __magic_name__ ) for t in timesteps]
UpperCamelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase__ : int = sigma.unsqueeze(-1 )
UpperCamelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 201
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowercase : int = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_lowercase : Optional[int] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_lowercase : Union[str, Any] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Tuple:
return float((preds == labels).mean() )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Dict:
lowercase_ : List[str] = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : List[Any] = float(pearsonr(UpperCAmelCase__ , UpperCAmelCase__ )[0] )
lowercase_ : List[Any] = float(spearmanr(UpperCAmelCase__ , UpperCAmelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : List[Any] ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase_ , lowercase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase_ , lowercase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase_ , lowercase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase_ : Dict = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
a =getattr(lowercase , lowercase )
if weight_type is not None:
a =getattr(lowercase , lowercase ).shape
else:
a =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a =value
elif weight_type == "weight_g":
a =value
elif weight_type == "weight_v":
a =value
elif weight_type == "bias":
a =value
else:
a =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =[]
a =fairseq_model.state_dict()
a =hf_model.feature_extractor
for name, value in fairseq_dict.items():
a =False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == '''group''' , )
a =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a =True
if "*" in mapped_key:
a =name.split(lowercase )[0].split('''.''' )[-2]
a =mapped_key.replace('''*''' , lowercase )
if "weight_g" in name:
a ='''weight_g'''
elif "weight_v" in name:
a ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
a ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a ='''weight'''
else:
a =None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =full_name.split('''conv_layers.''' )[-1]
a =name.split('''.''' )
a =int(items[0] )
a =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase=None ):
"""simple docstring"""
# load the pre-trained checkpoints
a =torch.load(lowercase )
a =WavLMConfigOrig(checkpoint['''cfg'''] )
a =WavLMOrig(lowercase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
a =WavLMConfig.from_pretrained(lowercase )
else:
a =WavLMConfig()
a =WavLMModel(lowercase )
recursively_load_weights(lowercase , lowercase )
hf_wavlm.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase_ : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 81
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
lowercase__ = np.array(__magic_name__ )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
lowercase__ = model.fit(disp=__magic_name__ , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> float:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
lowercase__ = regressor.predict(__magic_name__ )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> float:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(__magic_name__ , 25 )
lowercase__ = np.percentile(__magic_name__ , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> bool:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : Dict = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : str = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Any = Normalizer().fit_transform(data_input_df.values)
# split data
A : Optional[int] = normalize_df[:, 2].tolist()
A : Any = normalize_df[:, 0].tolist()
A : str = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : int = normalize_df[:, [1, 2]].tolist()
A : Any = x[: len(x) - 1]
A : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : Optional[int] = total_user[: len(total_user) - 1]
A : str = total_match[: len(total_match) - 1]
A : Union[str, Any] = total_date[len(total_date) - 1 :]
A : List[str] = total_user[len(total_user) - 1 :]
A : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : int = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
set_seed(770)
lowerCamelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowerCamelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowerCamelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _A ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =model_type
if use_small:
key += "_small"
return os.path.join(_lowerCAmelCase , REMOTE_MODEL_PATHS[key]['file_name'] )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
hf_hub_download(repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , local_dir=_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase="text" ):
"""simple docstring"""
if model_type == "text":
__lowercase =BarkSemanticModel
__lowercase =BarkSemanticConfig
__lowercase =BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase =BarkCoarseModel
__lowercase =BarkCoarseConfig
__lowercase =BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase =BarkFineModel
__lowercase =BarkFineConfig
__lowercase =BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase =f"""{model_type}_small""" if use_small else model_type
__lowercase =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_lowerCAmelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
__lowercase =torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
# this is a hack
__lowercase =checkpoint['model_args']
if "input_vocab_size" not in model_args:
__lowercase =model_args['vocab_size']
__lowercase =model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase =model_args.pop('n_head' )
__lowercase =model_args.pop('n_embd' )
__lowercase =model_args.pop('n_layer' )
__lowercase =ConfigClass(**checkpoint['model_args'] )
__lowercase =ModelClass(config=_lowerCAmelCase )
__lowercase =GenerationConfigClass()
__lowercase =model_generation_config
__lowercase =checkpoint['model']
# fixup checkpoint
__lowercase ='_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase =k[len(_lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
__lowercase =new_k.replace(_lowerCAmelCase , new_layer_name_dict[old_layer_name] )
__lowercase =state_dict.pop(_lowerCAmelCase )
__lowercase =set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase ={k for k in extra_keys if not k.endswith('.attn.bias' )}
__lowercase =set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase ={k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_lowerCAmelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(_lowerCAmelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
__lowercase =model.num_parameters(exclude_embeddings=_lowerCAmelCase )
__lowercase =checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(_lowerCAmelCase , 3 )} loss""" )
model.eval()
model.to(_lowerCAmelCase )
del checkpoint, state_dict
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase ='cpu' # do conversion on cpu
__lowercase =_get_ckpt_path(_lowerCAmelCase , use_small=_lowerCAmelCase )
__lowercase =_load_model(_lowerCAmelCase , _lowerCAmelCase , model_type=_lowerCAmelCase , use_small=_lowerCAmelCase )
# load bark initial model
__lowercase =_bark_load_model(_lowerCAmelCase , 'cpu' , model_type=_lowerCAmelCase , use_small=_lowerCAmelCase )
if model_type == "text":
__lowercase =bark_model['model']
if model.num_parameters(exclude_embeddings=_lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__lowercase =5
__lowercase =10
if model_type in ["text", "coarse"]:
__lowercase =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase =bark_model(_lowerCAmelCase )[0]
__lowercase =model(_lowerCAmelCase )
# take last logits
__lowercase =output_new_model_total.logits[:, [-1], :]
else:
__lowercase =3
__lowercase =8
__lowercase =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase =model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =bark_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =BarkSemanticConfig.from_pretrained(os.path.join(_lowerCAmelCase , 'config.json' ) )
__lowercase =BarkCoarseConfig.from_pretrained(os.path.join(_lowerCAmelCase , 'config.json' ) )
__lowercase =BarkFineConfig.from_pretrained(os.path.join(_lowerCAmelCase , 'config.json' ) )
__lowercase =EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__lowercase =BarkSemanticModel.from_pretrained(_lowerCAmelCase )
__lowercase =BarkCoarseModel.from_pretrained(_lowerCAmelCase )
__lowercase =BarkFineModel.from_pretrained(_lowerCAmelCase )
__lowercase =EncodecModel.from_pretrained('facebook/encodec_24khz' )
__lowercase =BarkConfig.from_sub_model_configs(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase =BarkModel(_lowerCAmelCase )
__lowercase =semantic
__lowercase =coarseAcoustic
__lowercase =fineAcoustic
__lowercase =codec
__lowercase =bark_generation_config
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
bark.save_pretrained(_lowerCAmelCase , repo_id=_lowerCAmelCase , push_to_hub=_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 48
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """CLIPImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : List[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__lowercase =self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple , *_lowerCAmelCase : str , **_lowerCAmelCase : int):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 48
| 1
|
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 2_000_000 ) -> int:
'''simple docstring'''
lowerCAmelCase : str = [0 for i in range(n + 1 )]
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : Tuple = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, _UpperCAmelCase ):
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Optional[Any] = 0
for i in range(_UpperCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 138
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__A : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "dhaka", _UpperCAmelCase = 5 ) -> int:
'''simple docstring'''
lowerCAmelCase : List[Any] = min(_UpperCAmelCase, 50 ) # Prevent abuse!
lowerCAmelCase : str = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
lowerCAmelCase : Optional[Any] = requests.get('https://www.google.com/search', params=_UpperCAmelCase, headers=_UpperCAmelCase )
lowerCAmelCase : int = BeautifulSoup(html.text, 'html.parser' )
lowerCAmelCase : List[Any] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);', str(soup.select('script' ) ) ) )
lowerCAmelCase : Optional[int] = json.dumps(_UpperCAmelCase )
lowerCAmelCase : str = json.loads(_UpperCAmelCase )
lowerCAmelCase : str = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",', _UpperCAmelCase, )
if not matched_google_image_data:
return 0
lowerCAmelCase : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]', '', str(_UpperCAmelCase ), )
lowerCAmelCase : Dict = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]', _UpperCAmelCase, )
for index, fixed_full_res_image in enumerate(_UpperCAmelCase ):
if index >= max_images:
return index
lowerCAmelCase : Any = bytes(_UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
lowerCAmelCase : Tuple = bytes(_UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
lowerCAmelCase : Optional[Any] = urllib.request.build_opener()
lowerCAmelCase : Any = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_UpperCAmelCase )
lowerCAmelCase : List[str] = f"query_{query.replace(' ', '_' )}"
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCAmelCase, f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__A : Tuple = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 138
| 1
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : Dict )-> str:
super().__init__(*__snake_case , **__snake_case )
snake_case = {}
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[int] , *__snake_case : int , **__snake_case : List[str] )-> Tuple:
snake_case = super().add_tokens(__snake_case , *__snake_case , **__snake_case )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def lowerCAmelCase ( self : Optional[int] , __snake_case : Optional[Any] , *__snake_case : Tuple , __snake_case : Optional[int]=1 , **__snake_case : Dict )-> Tuple:
snake_case = []
if num_vec_per_token == 1:
self.try_adding_tokens(__snake_case , *__snake_case , **__snake_case )
output.append(__snake_case )
else:
snake_case = []
for i in range(__snake_case ):
snake_case = placeholder_token + f'''_{i}'''
self.try_adding_tokens(__snake_case , *__snake_case , **__snake_case )
output.append(__snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
snake_case = output
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : Union[str, Any]=False , __snake_case : Optional[Any]=1.0 )-> List[str]:
if isinstance(__snake_case , __snake_case ):
snake_case = []
for i in range(len(__snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
snake_case = self.token_map[placeholder_token]
snake_case = tokens[: 1 + int(len(__snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
snake_case = copy.copy(__snake_case )
random.shuffle(__snake_case )
snake_case = text.replace(__snake_case , """ """.join(__snake_case ) )
return text
def __call__( self : int , __snake_case : Union[str, Any] , *__snake_case : Tuple , __snake_case : Union[str, Any]=False , __snake_case : int=1.0 , **__snake_case : str )-> Union[str, Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
__snake_case , vector_shuffle=__snake_case , prop_tokens_to_load=__snake_case ) , *__snake_case , **__snake_case , )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] , *__snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Optional[int]=1.0 , **__snake_case : Union[str, Any] )-> str:
return super().encode(
self.replace_placeholder_tokens_in_text(
__snake_case , vector_shuffle=__snake_case , prop_tokens_to_load=__snake_case ) , *__snake_case , **__snake_case , )
| 361
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[int] = ["""image_processor""", """feature_extractor"""]
a__ : int = """TvltImageProcessor"""
a__ : Tuple = """TvltFeatureExtractor"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
snake_case : int = image_processor
snake_case : int = feature_extractor
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
snake_case : str = None
if images is not None:
snake_case : Tuple = self.image_processor(SCREAMING_SNAKE_CASE , mask_pixel=SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if images_mixed is not None:
snake_case : Dict = self.image_processor(SCREAMING_SNAKE_CASE , is_mixed=SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if audio is not None:
snake_case : Dict = self.feature_extractor(
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , mask_audio=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : List[Any] = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE )
return output_dict
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.image_processor.model_input_names
snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 148
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCAmelCase : List[str] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict , lowerCamelCase :str , lowerCamelCase :bool , lowerCamelCase :str = None , lowerCamelCase :list = None ) -> Tuple:
UpperCAmelCase__ = None
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase__ = os.path.abspath("examples" )
for item in os.listdir(lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCamelCase , feature_script=lowerCamelCase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase__ = compare_against_test(
os.path.join(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = "\n".join(lowerCamelCase )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase__ = diff.replace(lowerCamelCase , "" )
self.assertEqual(lowerCamelCase , "" )
def UpperCAmelCase_ ( self :List[str] ) -> Any:
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
self.one_complete_example("complete_nlp_example.py" , lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.one_complete_example("complete_cv_example.py" , lowerCamelCase , lowerCamelCase , lowerCamelCase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = False
@classmethod
def UpperCAmelCase_ ( cls :List[Any] ) -> Any:
super().setUpClass()
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls :Union[str, Any] ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> int:
UpperCAmelCase__ = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
if torch.cuda.is_available():
UpperCAmelCase__ = torch.cuda.device_count()
else:
UpperCAmelCase__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
else:
self.assertIn("epoch 0:" , lowerCamelCase )
self.assertIn("epoch 1:" , lowerCamelCase )
@slow
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=lowerCamelCase )
UpperCAmelCase__ = re.findall("({.+})" , lowerCamelCase )
UpperCAmelCase__ = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase__ = ast.literal_eval(lowerCamelCase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self :List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase__ = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase , "tracking" ) ) )
def UpperCAmelCase_ ( self :Any ) -> Dict:
UpperCAmelCase__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
UpperCAmelCase__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 169
| 0
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(UpperCamelCase__, '''_dynamo''' ):
return False
return isinstance(UpperCamelCase__, torch._dynamo.eval_frame.OptimizedModule )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE__ : Any =is_compiled_module(UpperCamelCase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Dict =model
SCREAMING_SNAKE_CASE__ : Dict =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE__ : Optional[Any] =getattr(UpperCamelCase__, '''forward''' )
SCREAMING_SNAKE_CASE__ : List[Any] =model.__dict__.pop('''_original_forward''', UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] =forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE__ : Optional[Any] =forward
if getattr(UpperCamelCase__, '''_converted_to_transformer_engine''', UpperCamelCase__ ):
convert_model(UpperCamelCase__, to_transformer_engine=UpperCamelCase__ )
if is_compiled:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model
SCREAMING_SNAKE_CASE__ : Tuple =compiled_model
return model
def _a( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__, UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__, UpperCamelCase__ )
@contextmanager
def _a( **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE__ : List[Any] =str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
if not hasattr(UpperCamelCase__, '''__qualname__''' ) and not hasattr(UpperCamelCase__, '''__name__''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] =getattr(UpperCamelCase__, '''__class__''', UpperCamelCase__ )
if hasattr(UpperCamelCase__, '''__qualname__''' ):
return obj.__qualname__
if hasattr(UpperCamelCase__, '''__name__''' ):
return obj.__name__
return str(UpperCamelCase__ )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int ):
'''simple docstring'''
for key, value in source.items():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =destination.setdefault(UpperCamelCase__, {} )
merge_dicts(UpperCamelCase__, UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[str] =value
return destination
def _a( UpperCamelCase__ : int = None ):
'''simple docstring'''
if port is None:
SCREAMING_SNAKE_CASE__ : Any =2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 222
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =precision
SCREAMING_SNAKE_CASE__ : int =ceil(precision / 1_4 )
SCREAMING_SNAKE_CASE__ : int =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
SCREAMING_SNAKE_CASE__ : Tuple =1
SCREAMING_SNAKE_CASE__ : Any =1_3_5_9_1_4_0_9
SCREAMING_SNAKE_CASE__ : List[Any] =Decimal(UpperCamelCase__ )
for k in range(1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : str =factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 5_0
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 222
| 1
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class a__ :
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] =4_2
UpperCAmelCase__ : Tuple =None
@staticmethod
def _lowercase ( ) ->int:
"""simple docstring"""
raise NotImplementedError
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[Any] ) ->List[str]:
"""simple docstring"""
raise NotImplementedError
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) ->List[str]:
"""simple docstring"""
raise NotImplementedError
def _lowercase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def _lowercase ( cls : str ) ->int:
"""simple docstring"""
return f"`pip install {cls.pip_package or cls.name}`"
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict ="""optuna"""
@staticmethod
def _lowercase ( ) ->List[str]:
"""simple docstring"""
return is_optuna_available()
def _lowercase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : str ) ->Any:
"""simple docstring"""
return run_hp_search_optuna(A_ , A_ , A_ , **A_ )
def _lowercase ( self : Any , UpperCAmelCase__ : Tuple ) ->Optional[int]:
"""simple docstring"""
return default_hp_space_optuna(A_ )
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple ="""ray"""
UpperCAmelCase__ : Tuple ="""'ray[tune]'"""
@staticmethod
def _lowercase ( ) ->List[Any]:
"""simple docstring"""
return is_ray_available()
def _lowercase ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any] ) ->Dict:
"""simple docstring"""
return run_hp_search_ray(A_ , A_ , A_ , **A_ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) ->Dict:
"""simple docstring"""
return default_hp_space_ray(A_ )
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict ="""sigopt"""
@staticmethod
def _lowercase ( ) ->Dict:
"""simple docstring"""
return is_sigopt_available()
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Any ) ->List[Any]:
"""simple docstring"""
return run_hp_search_sigopt(A_ , A_ , A_ , **A_ )
def _lowercase ( self : int , UpperCAmelCase__ : str ) ->Dict:
"""simple docstring"""
return default_hp_space_sigopt(A_ )
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple ="""wandb"""
@staticmethod
def _lowercase ( ) ->Any:
"""simple docstring"""
return is_wandb_available()
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return run_hp_search_wandb(A_ , A_ , A_ , **A_ )
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->List[str]:
"""simple docstring"""
return default_hp_space_wandb(A_ )
UpperCAmelCase__ : Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowercase ( ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase__ ) > 0:
SCREAMING_SNAKE_CASE : Tuple = available_backends[0].name
if len(lowercase__ ) > 1:
logger.info(
F"{len(lowercase__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 245
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Optional[Any] = global_rng
__lowerCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : Any = min_seq_length
__lowerCAmelCase : Tuple = max_seq_length
__lowerCAmelCase : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Dict = feature_size
__lowerCAmelCase : Optional[int] = padding_value
__lowerCAmelCase : Tuple = sampling_rate
__lowerCAmelCase : Union[str, Any] = return_attention_mask
__lowerCAmelCase : Dict = do_normalize
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->Union[str, Any]:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : Tuple = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
class __lowercase (_UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = WavaVecaFeatureExtractor
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Any = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Dict = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : List[Any] = np.asarray(A_ )
__lowerCAmelCase : Any = feat_extract(A_ , return_tensors='''np''' ).input_values
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : str = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Optional[int] = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[int] = range(800 , 1400 , 200 )
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
__lowerCAmelCase : int = ['''longest''', '''max_length''', '''do_not_pad''']
__lowerCAmelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(A_ , A_ ):
__lowerCAmelCase : Union[str, Any] = feat_extract(A_ , max_length=A_ , padding=A_ )
__lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : List[str] = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
__lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : int = feat_extract(
A_ , truncation=A_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Optional[int] = feat_extract(
A_ , truncation=A_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
__lowerCAmelCase : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
import torch
__lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Any = np.random.rand(100 ).astype(np.floataa )
__lowerCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__lowerCAmelCase : Any = WavaVecaConfig.from_pretrained(A_ )
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(A_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 275
| 0
|
'''simple docstring'''
_a : List[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> int:
# Return True if there is node that has not iterated.
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [s]
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = [-1] * (len(lowercase ))
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase , lowercase , lowercase , lowercase ):
__lowerCAmelCase = float("""Inf""" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(lowercase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 46
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = np.max(lowercase , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
if "second_text" in kwargs:
__lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return self.tokenizer(__SCREAMING_SNAKE_CASE,text_pair=__SCREAMING_SNAKE_CASE,return_tensors=self.framework )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_outputs.logits[0].numpy()
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.model.config.idalabel[best_class]
__lowerCAmelCase = probabilities[best_class].item()
__lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase__ = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
UpperCamelCase = []
for num in range(len(snake_case__ ) ):
UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def a__ ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
| 0
|
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(snake_case )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , snake_case )
snake_case_ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) )
| 200
| 0
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: List[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_: List[str] ={
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'AI-Sweden/gpt-sw3-126m': 20_48,
'AI-Sweden/gpt-sw3-350m': 20_48,
'AI-Sweden/gpt-sw3-1.6b': 20_48,
'AI-Sweden/gpt-sw3-6.7b': 20_48,
'AI-Sweden/gpt-sw3-20b': 20_48,
}
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_VOCAB_FILES_MAP
a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__(self : Tuple , __a : Dict , __a : Union[str, Any]=False , __a : int=False , __a : Dict=False , __a : Any=None , __a : Tuple=None , __a : List[str]=None , __a : Optional[Any]=None , __a : Optional[Dict[str, Any]] = None , **__a : Optional[int] , ):
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
UpperCAmelCase_ = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCAmelCase_ = "<|endoftext|>" if eos_token is None else eos_token
UpperCAmelCase_ = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCAmelCase_ = unk_token if pad_token is None else pad_token
UpperCAmelCase_ = eos_token if bos_token is None else bos_token
else:
UpperCAmelCase_ = "<pad>" if pad_token is None else pad_token
UpperCAmelCase_ = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# Used for whitespace normalization in input texts
# fmt : off
UpperCAmelCase_ = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCAmelCase_ = re.compile(
f"""[{"".join(map(__a , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__(self : Optional[int] ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__(self : Optional[Any] , __a : int ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _lowercase (self : int ):
return len(self.sp_model )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ = self.non_printing_characters_re.sub("" , __a )
# Normalize whitespaces
UpperCAmelCase_ = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
UpperCAmelCase_ = unicodedata.normalize("NFC" , __a )
return text
def _lowercase (self : int , __a : str , **__a : str ):
UpperCAmelCase_ = self.preprocess_text(__a )
return self.sp_model.encode(__a , out_type=__a )
def _lowercase (self : Union[str, Any] , __a : str ):
return self.sp_model.PieceToId(__a )
def _lowercase (self : List[Any] , __a : int ):
return self.sp_model.IdToPiece(__a )
@staticmethod
def _lowercase (__a : str ):
return out_string
def _lowercase (self : Dict , __a : List[str] ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(__a )
UpperCAmelCase_ = False
out_string += self.sp_model.decode(__a )
return out_string
def _lowercase (self : List[str] ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase (self : Optional[Any] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _lowercase (self : str , __a : Union[str, List[str]] , __a : Union[str, bool] = False ):
if isinstance(__a , __a ):
UpperCAmelCase_ = self.preprocess_text(__a )
UpperCAmelCase_ = self.sp_model.encode(__a )
else:
UpperCAmelCase_ = [self.preprocess_text(__a ) for t in text]
UpperCAmelCase_ = self.sp_model.encode(__a )
if return_tensors is True or return_tensors == "pt":
UpperCAmelCase_ = torch.tensor(__a )
return token_ids
def _lowercase (self : List[Any] , __a : Union[int, List[int]] ):
return self.sp_model.decode(__a )
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
UpperCAmelCase_ = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__a ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__a )
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1
| 1
|
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = torch.device('''cpu''')
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase :Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = val
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for k in state_dict.keys():
__UpperCamelCase :Union[str, Any] = k
if ".pwconv" in k:
__UpperCamelCase :Any = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
__UpperCamelCase :List[Any] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
__UpperCamelCase :Optional[int] = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
__UpperCamelCase :int = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
__UpperCamelCase :int = k_new.split('''.''' )
if ls[2].isdigit():
__UpperCamelCase :List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__UpperCamelCase :Optional[int] = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCamelCase :Tuple = 1_000
__UpperCamelCase :Any = "huggingface/label-files"
__UpperCamelCase :int = "imagenet-1k-id2label.json"
__UpperCamelCase :Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :str = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :int = idalabel
__UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCamelCase :Optional[Any] = [3, 3, 6, 4]
__UpperCamelCase :Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCamelCase :List[str] = [3, 3, 9, 6]
__UpperCamelCase :Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCamelCase :Optional[int] = [4, 3, 10, 5]
__UpperCamelCase :Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCamelCase :str = [4, 4, 12, 6]
__UpperCamelCase :Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
__UpperCamelCase :Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Optional[int] = checkpoint
__UpperCamelCase :Any = create_rename_keys(SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__UpperCamelCase :Tuple = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
# prepare test inputs
__UpperCamelCase :Optional[Any] = prepare_img()
__UpperCamelCase :str = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
__UpperCamelCase :Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# compare outputs from both models
__UpperCamelCase :str = get_expected_output(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
__lowercase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 43
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
if n == 0:
return 0
_lowercase : Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
_lowercase : int = max(
lowerCamelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase_ ) )
return max_revue
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase : Any = float('-inf' )
for i in range(1 , n + 1 ):
_lowercase : List[Any] = max(
lowerCamelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase_ , lowerCamelCase_ ) , )
_lowercase : Dict = max_revenue
return max_rev[n]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_enforce_args(lowerCamelCase_ , lowerCamelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase : int = [float('-inf' ) for _ in range(n + 1 )]
_lowercase : str = 0
for i in range(1 , n + 1 ):
_lowercase : Tuple = max_rev[i]
for j in range(1 , i + 1 ):
_lowercase : Any = max(lowerCamelCase_ , prices[j - 1] + max_rev[i - j] )
_lowercase : Optional[Any] = max_revenue_i
return max_rev[n]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if n < 0:
_lowercase : Optional[int] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCamelCase_ )
if n > len(lowerCamelCase_ ):
_lowercase : Tuple = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(lowerCamelCase_ )}'''
)
raise ValueError(lowerCamelCase_ )
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : List[str] = [6, 10, 12, 15, 20, 23]
_lowercase : Any = len(lowerCamelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase : Tuple = 36
_lowercase : int = top_down_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = bottom_up_cut_rod(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = naive_cut_rod_recursive(lowerCamelCase_ , lowerCamelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 21
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: str = logging.get_logger(__name__)
_UpperCamelCase: Union[str, Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'mgp-str'
def __init__( self : Tuple, lowerCAmelCase : str=[32, 128], lowerCAmelCase : List[Any]=4, lowerCAmelCase : Union[str, Any]=3, lowerCAmelCase : Union[str, Any]=27, lowerCAmelCase : Union[str, Any]=38, lowerCAmelCase : Tuple=50257, lowerCAmelCase : Dict=30522, lowerCAmelCase : Optional[int]=768, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Optional[int]=12, lowerCAmelCase : Union[str, Any]=4.0, lowerCAmelCase : Any=True, lowerCAmelCase : Optional[int]=False, lowerCAmelCase : Optional[int]=1e-5, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Optional[Any]=0.0, lowerCAmelCase : List[str]=0.0, lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=0.02, **lowerCAmelCase : Optional[int], ) -> List[Any]:
super().__init__(**lowerCAmelCase )
lowercase : int = image_size
lowercase : Dict = patch_size
lowercase : List[str] = num_channels
lowercase : Union[str, Any] = max_token_length
lowercase : str = num_character_labels
lowercase : Tuple = num_bpe_labels
lowercase : Tuple = num_wordpiece_labels
lowercase : Optional[Any] = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Tuple = mlp_ratio
lowercase : Union[str, Any] = distilled
lowercase : List[str] = layer_norm_eps
lowercase : Optional[int] = drop_rate
lowercase : Tuple = qkv_bias
lowercase : int = attn_drop_rate
lowercase : Any = drop_path_rate
lowercase : Optional[Any] = output_aa_attentions
lowercase : Optional[Any] = initializer_range
| 53
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__ ( ) -> Dict:
'''simple docstring'''
lowercase : List[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = dct.pop(_UpperCAmelCase )
lowercase : Tuple = val
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase : List[Any] = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowercase : Optional[Any] = qkv_bias
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] = 3_64 if 'coco' in model_name else 2_24
lowercase : int = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase : Optional[int] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase : int = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase : Any = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase : Any = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase , lowercase : Union[str, Any] = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase : Any = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
lowercase : Any = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase , lowercase : Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase , lowercase , lowercase : List[str] = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase : int = original_model.state_dict()
lowercase : str = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase : Dict = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase : List[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase : List[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase : Any = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase : List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase : Optional[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase : Optional[Any] = key.replace('t5' , 'language' )
lowercase : Tuple = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
lowercase , lowercase : str = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase : List[Any] = load_demo_image()
lowercase : Optional[Any] = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowercase : str = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
lowercase : List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowercase : Union[str, Any] = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
lowercase : Tuple = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase : str = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
lowercase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase : Tuple = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase : str = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase : Any = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_UpperCAmelCase )
else:
# cast to same type
lowercase : Dict = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase : str = ''
lowercase : List[str] = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
lowercase : Any = original_model.generate({'image': original_pixel_values} )
lowercase : Union[str, Any] = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
lowercase : str = input_ids.shape[1]
lowercase : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
lowercase : Optional[int] = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] = argparse.ArgumentParser()
_UpperCamelCase: Dict = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_UpperCamelCase: int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 53
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
def lowerCAmelCase( __lowerCamelCase ):
__a = len(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(i + 1 , __lowerCamelCase ):
if numbers[j] < numbers[i]:
__a , __a = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCamelCase_ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 197
|
def lowerCAmelCase( __lowerCamelCase ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a = ''
while len(__lowerCamelCase ) % 3 != 0:
__a = '0' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(__lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(__lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCamelCase ) )
oct_string += str(__lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 197
| 1
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ = {
'Salesforce/codegen-350M-mono': 2_048,
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ['input_ids', 'attention_mask']
UpperCAmelCase__ : Dict = CodeGenTokenizer
def __init__( self: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Optional[int]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Any=False , **UpperCamelCase_: Tuple , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("""add_bos_token""" , UpperCamelCase_ ):
__lowerCamelCase = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__lowerCamelCase = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**UpperCamelCase_ )
__lowerCamelCase = add_prefix_space
def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: List[str] ):
__lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ):
__lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
__lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[List[str]] = None , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any ):
def find_re(UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__lowerCamelCase = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__lowerCamelCase = list(re.finditer("""^print""" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__lowerCamelCase = completion[: prints[1].start()]
__lowerCamelCase = list(re.finditer("""^def""" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__lowerCamelCase = completion[: defs[1].start()]
__lowerCamelCase = 0
__lowerCamelCase = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 12
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = num_mel_bins
A : Tuple = do_ceptral_normalize
A : Dict = normalize_means
A : List[Any] = normalize_vars
A : List[str] = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
A : Dict = x[:input_length].mean(axis=0 )
A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if normalize_vars:
A : str = x[:input_length].std(axis=0 )
A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
A : List[str] = padding_value
# make sure array is in float32
A : Tuple = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : Any = [raw_speech]
# extract fbank features
A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
A : str = BatchFeature({'''input_features''': features} )
A : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# make sure list is in array format
A : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
A : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A : Dict = (
np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE )
if return_tensors is not None:
A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
| 3
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def _A ( self , _A ):
'''simple docstring'''
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_masked_index(_A )
__SCREAMING_SNAKE_CASE = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _A ( self , _A ):
'''simple docstring'''
if isinstance(_A , _A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_A )
def _A ( self , _A , _A=None , **_A ):
'''simple docstring'''
if return_tensors is None:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , return_tensors=_A )
self.ensure_exactly_one_mask_token(_A )
return model_inputs
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model(**_A )
__SCREAMING_SNAKE_CASE = model_inputs['input_ids']
return model_outputs
def _A ( self , _A , _A=5 , _A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
__SCREAMING_SNAKE_CASE = target_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_outputs['input_ids'][0]
__SCREAMING_SNAKE_CASE = model_outputs['logits']
if self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = stable_softmax(_A , axis=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE = tf.gather_nd(tf.squeeze(_A , 0 ) , target_ids.reshape(-1 , 1 ) )
__SCREAMING_SNAKE_CASE = tf.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = tf.math.top_k(_A , k=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = topk.values.numpy(), topk.indices.numpy()
else:
__SCREAMING_SNAKE_CASE = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__SCREAMING_SNAKE_CASE = outputs[0, masked_index, :]
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 )
if target_ids is not None:
__SCREAMING_SNAKE_CASE = probs[..., target_ids]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = probs.topk(_A )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__SCREAMING_SNAKE_CASE = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__SCREAMING_SNAKE_CASE = input_ids.numpy().copy()
if target_ids is not None:
__SCREAMING_SNAKE_CASE = target_ids[p].tolist()
__SCREAMING_SNAKE_CASE = p
# Filter padding out:
__SCREAMING_SNAKE_CASE = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(_A , skip_special_tokens=_A )
__SCREAMING_SNAKE_CASE = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_A )
result.append(_A )
if single_mask:
return result[0]
return result
def _A ( self , _A , _A=None ):
'''simple docstring'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = [targets]
try:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_vocab()
except Exception:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = []
for target in targets:
__SCREAMING_SNAKE_CASE = vocab.get(_A , _A )
if id_ is None:
__SCREAMING_SNAKE_CASE = self.tokenizer(
_A , add_special_tokens=_A , return_attention_mask=_A , return_token_type_ids=_A , max_length=1 , truncation=_A , )['input_ids']
if len(_A ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
__SCREAMING_SNAKE_CASE = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
__SCREAMING_SNAKE_CASE = list(set(_A ) )
if len(_A ) == 0:
raise ValueError('At least one target must be provided when passed.' )
__SCREAMING_SNAKE_CASE = np.array(_A )
return target_ids
def _A ( self , _A=None , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if targets is not None:
__SCREAMING_SNAKE_CASE = self.get_target_ids(_A , _A )
__SCREAMING_SNAKE_CASE = target_ids
if top_k is not None:
__SCREAMING_SNAKE_CASE = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _A , *_A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().__call__(_A , **_A )
if isinstance(_A , _A ) and len(_A ) == 1:
return outputs[0]
return outputs
| 363
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '''Wav2Vec2FeatureExtractor'''
UpperCamelCase__ : Union[str, Any] = '''AutoTokenizer'''
def __init__( self , _A , _A ):
'''simple docstring'''
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
@classmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech' )
else:
__SCREAMING_SNAKE_CASE = kwargs.pop('audio' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
__SCREAMING_SNAKE_CASE = kwargs.pop('input_features' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('labels' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__SCREAMING_SNAKE_CASE = labels['input_ids']
return input_features
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
| 118
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : List[str] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 222
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_UpperCAmelCase : List[Any] = get_logger(__name__)
_UpperCAmelCase : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase :
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@add_start_docstrings(A_ )
def __call__( self , A_ , A_ , A_ , **A_ ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
UpperCamelCase = inspect.signature(processor.__call__ ).parameters
if len(A_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
UpperCamelCase = processor(A_ , A_ , A_ , **A_ )
else:
UpperCamelCase = processor(A_ , A_ , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
UpperCamelCase = temperature
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores / self.temperature
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(A_ , A_ ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
UpperCamelCase = top_p
UpperCamelCase = filter_value
UpperCamelCase = min_tokens_to_keep
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , scores.shape[-1] )
UpperCamelCase = jnp.full_like(A_ , self.filter_value )
UpperCamelCase = jax.nn.softmax(A_ , axis=-1 ).cumsum(axis=-1 )
UpperCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase = jnp.roll(A_ , 1 )
score_mask |= score_mask.at[:, 0].set(A_ )
# min tokens to keep
UpperCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(A_ )
UpperCamelCase = jnp.where(A_ , A_ , A_ )
UpperCamelCase = jax.lax.sort_key_val(A_ , A_ )[-1]
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ = -float('Inf' ) , A_ = 1 ) -> List[str]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
UpperCamelCase = max(A_ , A_ )
UpperCamelCase = filter_value
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = scores.shape
UpperCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCamelCase , UpperCamelCase = lax.top_k(A_ , A_ )
UpperCamelCase = jnp.broadcast_to((jnp.arange(A_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCamelCase = topk_scores.flatten()
UpperCamelCase = topk_indices.flatten() + shift
UpperCamelCase = next_scores_flat.at[topk_indices_flat].set(A_ )
UpperCamelCase = next_scores_flat.reshape(A_ , A_ )
return next_scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = bos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = max_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = jnp.full(scores.shape , -float('inf' ) )
UpperCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0 ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(A_ , A_ ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
UpperCamelCase = min_length
UpperCamelCase = eos_token_id
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
# create boolean flag to decide if min length penalty should be applied
UpperCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = list(A_ )
UpperCamelCase = begin_index
def __call__( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , A_ )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = dict(A_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase = force_token_array.at[index].set(A_ )
UpperCamelCase = jnp.intaa(A_ )
def __call__( self , A_ , A_ , A_ ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(A_ ):
UpperCamelCase = scores.shape[0]
UpperCamelCase = self.force_token_array[generation_idx]
UpperCamelCase = jnp.ones_like(A_ , dtype=scores.dtype ) * -float('inf' )
UpperCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCamelCase = lax.dynamic_update_slice(A_ , A_ , (0, current_token) )
return new_scores
UpperCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_ ) , lambda: scores , ) , )
return scores
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = generate_config.eos_token_id
UpperCamelCase = generate_config.no_timestamps_token_id
UpperCamelCase = generate_config.no_timestamps_token_id + 1
UpperCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , 'max_initial_timestamp_index' ):
UpperCamelCase = generate_config.max_initial_timestamp_index
else:
UpperCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase = model_config.vocab_size
def __call__( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(A_ , A_ ):
UpperCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
UpperCamelCase = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_ )
UpperCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
UpperCamelCase = jnp.where(cur_len == self.begin_index , A_ , A_ )
UpperCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
UpperCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase = jax.nn.log_softmax(A_ , axis=-1 )
def handle_cumulative_probs(A_ , A_ ):
UpperCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , A_ , )
UpperCamelCase = jax.vmap(A_ )(A_ , A_ )
return scores
| 222
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_ = 256_047
lowerCAmelCase_ = 256_145
@require_sentencepiece
@require_tokenizers
class __A ( a__ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = NllbTokenizer
lowerCAmelCase : Tuple = NllbTokenizerFast
lowerCAmelCase : Any = True
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Any = {}
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : List[Any] = NllbTokenizer(_lowerCamelCase ,keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : int = NllbTokenizer(_lowerCamelCase ,keep_accents=_lowerCamelCase )
lowercase__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase__ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase__ : Any = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase__ : int = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase ,**_lowerCamelCase )
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : Any = tokenizer_r.save_pretrained(_lowerCamelCase )
lowercase__ : List[str] = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase__ : int = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowerCamelCase ,_lowerCamelCase )
# Checks everything loads correctly in the same way
lowercase__ : Tuple = tokenizer_r.from_pretrained(_lowerCamelCase )
lowercase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
lowercase__ : List[Any] = tempfile.mkdtemp()
lowercase__ : Optional[Any] = tokenizer_r.save_pretrained(_lowerCamelCase ,legacy_format=_lowerCamelCase )
lowercase__ : Dict = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase ,_lowerCamelCase )
# Checks everything loads correctly in the same way
lowercase__ : List[Any] = tokenizer_r.from_pretrained(_lowerCamelCase )
lowercase__ : Optional[Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : List[Any] = tokenizer_r.save_pretrained(_lowerCamelCase ,legacy_format=_lowerCamelCase )
lowercase__ : Dict = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ : Any = tokenizer_r.from_pretrained(_lowerCamelCase )
lowercase__ : int = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase ,_lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if not self.test_seqaseq:
return
lowercase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowercase__ : Dict = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowercase__ : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowercase__ : Optional[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase ,tgt_texts=_lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
lowercase__ : List[str] = tokenizer.prepare_seqaseq_batch(
_lowerCamelCase ,tgt_texts=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
lowercase__ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn('''decoder_input_ids''' ,_lowerCamelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : List[Any] = [AddedToken('''<special>''' ,lstrip=_lowerCamelCase )]
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase )
lowercase__ : int = tokenizer_r.encode('''Hey this is a <special> token''' )
lowercase__ : Tuple = tokenizer_r.encode('''<special>''' ,add_special_tokens=_lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase ,)
lowercase__ : int = self.tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,**_lowerCamelCase )
lowercase__ : Union[str, Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
lowercase__ : Optional[Any] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = 'facebook/nllb-200-distilled-600M'
lowerCAmelCase : List[str] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase : int = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCAmelCase : Dict = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''eng_Latn''' ,tgt_lang='''ron_Latn''' )
lowercase__ : str = 1
return cls
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] ,256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] ,256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] ,256_057 )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertIn(_lowerCamelCase ,self.tokenizer.all_special_ids )
# fmt: off
lowercase__ : Optional[int] = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
lowercase__ : List[str] = self.tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
lowercase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,_lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] ,_lowerCamelCase )
lowercase__ : Tuple = 10
lowercase__ : Any = self.tokenizer(_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) ,[256_203, 3] )
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = tempfile.mkdtemp()
lowercase__ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ : Any = NllbTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCamelCase )
@require_torch
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
lowercase__ : List[Any] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
lowercase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.tokenizer(self.src_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
lowercase__ : int = self.tokenizer(
text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=10 ,return_tensors='''pt''' )
lowercase__ : Union[str, Any] = targets['''input_ids''']
lowercase__ : Union[str, Any] = shift_tokens_right(
_lowerCamelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : int = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[256_047, 70, 7_356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256_057,
} ,)
@require_torch
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = True
lowercase__ : List[Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
lowercase__ : Any = False
lowercase__ : List[Any] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' ,src_lang='''eng_Latn''' ,tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids ,[256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 369
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : int = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : Dict = [3, 3, 3, 3]
lowercase__ : str = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : List[str] = [4, 4, 4, 4]
lowercase__ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : List[str] = [3, 3, 3, 3]
else:
lowercase__ : Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[int] = 96
elif "small" in model_name:
lowercase__ : Union[str, Any] = 96
elif "base" in model_name:
lowercase__ : Tuple = 1_28
elif "large" in model_name:
lowercase__ : Any = 1_92
elif "xlarge" in model_name:
lowercase__ : Any = 2_56
elif "huge" in model_name:
lowercase__ : Union[str, Any] = 3_52
# set label information
lowercase__ : List[Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ : Optional[int] = '''imagenet-22k-id2label.json'''
else:
lowercase__ : Optional[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Dict = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : int = FocalNetConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , )
return config
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if "patch_embed.proj" in name:
lowercase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ : Dict = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ : Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ : Union[str, Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ : Dict = '''layernorm.bias'''
if "head" in name:
lowercase__ : Dict = name.replace('''head''' , '''classifier''' )
else:
lowercase__ : List[Any] = '''focalnet.''' + name
return name
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> List[str]:
# fmt: off
lowercase__ : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ : Optional[int] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __lowerCamelCase )
lowercase__ : str = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ : int = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
lowercase__ : List[Any] = get_focalnet_config(__lowerCamelCase )
lowercase__ : Optional[int] = FocalNetForImageClassification(__lowerCamelCase )
model.eval()
# load state dict
model.load_state_dict(__lowerCamelCase )
# verify conversion
lowercase__ : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ : int = BitImageProcessor(
do_resize=__lowerCamelCase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , )
lowercase__ : str = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase__ : List[str] = processor(images=__lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ : Optional[Any] = image_transforms(__lowerCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 )
lowercase__ : Optional[Any] = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowercase__ : Optional[int] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Dict = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowercase__ : List[str] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowercase__ : List[str] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowerCAmelCase = i + 1
else:
lowerCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 46
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
| 1
|
import math
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float ):
return math.pow(__a , 2 ) - a
def SCREAMING_SNAKE_CASE ( snake_case_ : float ):
return 2 * x
def SCREAMING_SNAKE_CASE ( snake_case_ : float ):
snake_case__ : Dict = 2.0
while start <= a:
snake_case__ : int = math.pow(__a , 2 )
return start
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : int = 9999 , snake_case_ : float = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("math domain error" )
snake_case__ : List[str] = get_initial_point(__a )
for _ in range(__a ):
snake_case__ : Tuple = value
snake_case__ : Optional[int] = value - fx(__a , __a ) / fx_derivative(__a )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case__ : int = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Union[str, Any] = CLIPTextModel(__A )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , __A : int , __A : Any=0 ):
snake_case__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
snake_case__ : List[Any] = torch.manual_seed(__A )
else:
snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs(__A )
snake_case__ : List[str] = sd_pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : List[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : List[Any] = "french fries"
snake_case__ : str = sd_pipe(**__A , negative_prompt=__A )
snake_case__ : Any = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Any = self.get_dummy_inputs(__A )
snake_case__ : Tuple = [inputs["prompt"]] * 2
snake_case__ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : List[str] = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
snake_case__ : str = image.permute(0 , 3 , 1 , 2 )
snake_case__ : int = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : str = sd_pipe(**__A ).images
snake_case__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : int = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : Optional[Any] = sd_pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : str = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = VaeImageProcessor(do_resize=__A , do_normalize=__A )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
snake_case__ : int = components["vae"]
snake_case__ : Union[str, Any] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : str = pipe(**__A )[0]
snake_case__ : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str , __A : Dict=0 ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
snake_case__ : Tuple = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ : Optional[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Dict ):
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
snake_case__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : Optional[int] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Any = latents[0, -3:, -3:, -1]
snake_case__ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : Any = False
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[Any] = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Dict = self.get_inputs()
snake_case__ : List[Any] = pipe(**__A )
snake_case__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self : Tuple ):
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Union[str, Any] = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case__ : Optional[Any] = "timbrooks/instruct-pix2pix"
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = pipe(**__A )
snake_case__ : Tuple = output.images[0]
snake_case__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 286
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__a = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , *snake_case_ : List[Any] , **snake_case_ : Optional[int] ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 35
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any="attention" ):
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
UpperCamelCase :Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase :Optional[int] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
UpperCamelCase :List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase :Union[str, Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
UpperCamelCase :Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase :str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
UpperCamelCase :str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ):
if split_mlp_wi:
UpperCamelCase :List[Any] = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
UpperCamelCase :int = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
UpperCamelCase :str = (wi_a, wi_a)
else:
UpperCamelCase :Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
UpperCamelCase :Optional[int] = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _A ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
UpperCamelCase :Tuple = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase :List[Any] = {'''/'''.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase :int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase :int = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :Dict = k.T
UpperCamelCase :Optional[Any] = o.T
UpperCamelCase :int = q.T
UpperCamelCase :Any = v.T
# Block i, layer 1 (MLP).
UpperCamelCase :Tuple = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Any = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[Any] = wi[0].T
UpperCamelCase :Tuple = wi[1].T
else:
UpperCamelCase :Optional[Any] = wi.T
UpperCamelCase :Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :List[str] = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''encoder''' ).T
UpperCamelCase :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCamelCase :str = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''encoder''' ).T
UpperCamelCase :Any = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
UpperCamelCase :Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''self_attention''' )
UpperCamelCase :str = layer_norm
UpperCamelCase :int = k.T
UpperCamelCase :Optional[int] = o.T
UpperCamelCase :Tuple = q.T
UpperCamelCase :List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase :str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase :Tuple = layer_norm
UpperCamelCase :Optional[Any] = k.T
UpperCamelCase :List[str] = o.T
UpperCamelCase :List[str] = q.T
UpperCamelCase :str = v.T
# Block i, layer 2 (MLP).
UpperCamelCase :List[str] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase , UpperCamelCase :Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = layer_norm
if split_mlp_wi:
UpperCamelCase :List[str] = wi[0].T
UpperCamelCase :str = wi[1].T
else:
UpperCamelCase :Dict = wi.T
UpperCamelCase :Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase :Tuple = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''decoder''' ).T
UpperCamelCase :Union[str, Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : bool ):
UpperCamelCase :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase :Dict = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase :List[Any] = state_dict['''shared.weight''']
return state_dict
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
UpperCamelCase :Dict = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Dict = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
UpperCamelCase :Any = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase :List[str] = UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase :Any = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Done''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 259
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = 0
lowercase_ = 2
while digits < n:
index += 1
lowercase_ = len(str(fibonacci(__lowerCAmelCase ) ) )
return index
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(__lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 313
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 1
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCAmelCase : List[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=None ) -> int:
require_version(deps[pkg] , _lowerCAmelCase )
| 300
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 174
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase: Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase: Dict = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase: str = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase: str = {
"""camembert-base""": 512,
}
UpperCAmelCase: List[Any] = """▁"""
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="</s>" ,UpperCAmelCase_="<s>" ,UpperCAmelCase_="<unk>" ,UpperCAmelCase_="<pad>" ,UpperCAmelCase_="<mask>" ,UpperCAmelCase_=["<s>NOTUSED", "</s>NOTUSED"] ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : Tuple = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else mask_token
_lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,additional_special_tokens=UpperCAmelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCAmelCase_ ,)
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
_lowercase : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowercase : int = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_lowercase : Optional[Any] = len(self.fairseq_tokens_to_ids )
_lowercase : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_lowercase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
_lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCamelCase__ ( self ):
_lowercase : Tuple = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
return self.sp_model.encode(UpperCAmelCase_ ,out_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = []
_lowercase : List[Any] = """"""
_lowercase : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
_lowercase : Any = True
_lowercase : Optional[int] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
_lowercase : int = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def __getstate__( self ):
_lowercase : Optional[Any] = self.__dict__.copy()
_lowercase : List[str] = None
return state
def __setstate__( self ,UpperCAmelCase_ ):
_lowercase : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
_lowercase : Union[str, Any] = {}
_lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Optional[int] = os.path.join(
UpperCAmelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ ,"""wb""" ) as fi:
_lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 336
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase: Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase: Tuple = [0, 25, 50]
UpperCAmelCase: List[Any] = [25, 50, 75]
UpperCAmelCase: Optional[int] = fuzz.membership.trimf(X, abca)
UpperCAmelCase: Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase: List[Any] = np.ones(75)
UpperCAmelCase: Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase: str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase: List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase: Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase: int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase: int = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase: List[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase: int = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 336
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[str] = "dandelin/vilt-b32-finetuned-vqa"
__snake_case : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__snake_case : Tuple = "image_qa"
__snake_case : Optional[Any] = AutoProcessor
__snake_case : Optional[Any] = AutoModelForVisualQuestionAnswering
__snake_case : Any = ["image", "text"]
__snake_case : List[str] = ["text"]
def __init__( self: str , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: "Image" , UpperCAmelCase_: str ):
'''simple docstring'''
return self.pre_processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
with torch.no_grad():
return self.model(**UpperCAmelCase_ ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import doctest
from collections import deque
import numpy as np
class __lowerCamelCase :
def __init__( self ) -> None:
snake_case_ = [2, 1, 2, -1]
snake_case_ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self ) -> list[float]:
snake_case_ = len(self.first_signal )
snake_case_ = len(self.second_signal )
snake_case_ = max(lowerCamelCase , lowerCamelCase )
# create a zero matrix of max_length x max_length
snake_case_ = [[0] * max_length for i in range(lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase ):
snake_case_ = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase )
for j, item in enumerate(lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case_ = np.matmul(np.transpose(lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 34
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase_ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
lowerCamelCase_ : Dict = 'all_checks'
lowerCamelCase_ : Any = 'basic_checks'
lowerCamelCase_ : Any = 'no_checks'
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case_ = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase_ ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase_ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCamelCase( lowercase_ , lowercase_ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case_ = shaaaa()
with open(lowercase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowercase_ )
snake_case_ = m.hexdigest()
else:
snake_case_ = None
return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum}
def UpperCamelCase( lowercase_ ) -> List[str]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 34
| 1
|
import re
import string
import numpy as np
import datasets
lowercase = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowercase = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowercase = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _UpperCamelCase ( self , a , a , a=None , a=False , a=False , a=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ = np.asarray(_A )
snake_case_ = np.asarray(_A )
if ignore_case:
snake_case_ = np.char.lower(_A )
snake_case_ = np.char.lower(_A )
if ignore_punctuation:
snake_case_ = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ = np.char.translate(_A , table=_A )
snake_case_ = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ = string.digits.maketrans('' , '' , string.digits )
snake_case_ = np.char.translate(_A , table=_A )
snake_case_ = np.char.translate(_A , table=_A )
snake_case_ = predictions == references
return {"exact_match": np.mean(_A ) * 1_00}
| 178
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def _SCREAMING_SNAKE_CASE ( a , a , a=8 ) -> Tuple:
__A : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__A : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE ( a , a=5_12 , a=5_12 ) -> int:
__A : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__A : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
__A : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
__A : int = np.transpose(a , [2, 0, 1] )
__A : Tuple = torch.from_numpy(a ).unsqueeze(0 )
return image
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
__A : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self , _A , _A , _A ):
# get the original timestep using init_timestep
__A : Optional[int] = min(int(num_inference_steps * strength ) , _A )
__A : Dict = max(num_inference_steps - init_timestep , 0 )
__A : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A=None ):
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
__A : Union[str, Any] = image.to(device=_A , dtype=_A )
__A : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__A : int = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
__A : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
__A : str = torch.cat(_A , dim=0 )
else:
__A : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
__A : Tuple = self.movq.config.scaling_factor * init_latents
__A : Optional[int] = torch.cat([init_latents] , dim=0 )
__A : Union[str, Any] = init_latents.shape
__A : List[str] = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
__A : Optional[Any] = self.scheduler.add_noise(_A , _A , _A )
__A : Optional[int] = init_latents
return latents
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__A : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
__A : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def UpperCAmelCase_ ( self , _A=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__A : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__A : int = None
for cpu_offloaded_model in [self.unet, self.movq]:
__A , __A : Optional[int] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__A : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
__A : List[Any] = self._execution_device
__A : Optional[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
__A : Optional[Any] = torch.cat(_A , dim=0 )
__A : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
__A : List[Any] = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__A : Union[str, Any] = image_embeds.repeat_interleave(_A , dim=0 )
__A : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__A : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
__A : List[Any] = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
__A : Dict = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
__A : Any = image.to(dtype=image_embeds.dtype , device=_A )
__A : Tuple = self.movq.encode(_A )['latents']
__A : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
__A , __A : int = self.get_timesteps(_A , _A , _A )
__A : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__A , __A : Any = downscale_height_and_width(_A , _A , self.movq_scale_factor )
__A : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__A : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A : Dict = {'image_embeds': image_embeds}
__A : List[str] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__A , __A : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__A , __A : Optional[Any] = noise_pred.chunk(2 )
__A , __A : List[str] = variance_pred.chunk(2 )
__A : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__A : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__A , __A : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__A : List[str] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
__A : List[Any] = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__A : List[str] = image * 0.5 + 0.5
__A : List[str] = image.clamp(0 , 1 )
__A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__A : Any = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 280
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
while second != 0:
_snake_case = first & second
first ^= second
_snake_case = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = int(input("""Enter the first number: """).strip())
UpperCAmelCase_ = int(input("""Enter the second number: """).strip())
print(F"{add(first, second) = }")
| 295
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''decord''' )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Union[str, Any]:
lowercase_ = {}
if frame_sampling_rate is not None:
lowercase_ = frame_sampling_rate
if num_frames is not None:
lowercase_ = num_frames
lowercase_ = {}
if top_k is not None:
lowercase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 ) -> int:
if num_frames is None:
lowercase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowercase_ = BytesIO(requests.get(SCREAMING_SNAKE_CASE_ ).content )
lowercase_ = VideoReader(SCREAMING_SNAKE_CASE_ )
videoreader.seek(0 )
lowercase_ = 0
lowercase_ = num_frames * frame_sampling_rate - 1
lowercase_ = np.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num=SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
lowercase_ = videoreader.get_batch(SCREAMING_SNAKE_CASE_ ).asnumpy()
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
return model_inputs
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
lowercase_ = self.model.config.num_labels
if self.framework == "pt":
lowercase_ = model_outputs.logits.softmax(-1 )[0]
lowercase_ , lowercase_ = probs.topk(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ = scores.tolist()
lowercase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
| 30
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
def __init__( self :Optional[int] , lowercase_ :int )-> None:
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def UpperCAmelCase_ ( self :List[str] , lowercase_ :list[float] , lowercase_ :list[float] )-> None:
if len(lowercase_ ) < self.order:
A__ = [1.0, *a_coeffs]
if len(lowercase_ ) != self.order + 1:
A__ = (
F"Expected a_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowercase_ )}"
)
raise ValueError(lowercase_ )
if len(lowercase_ ) != self.order + 1:
A__ = (
F"Expected b_coeffs to have {self.order + 1} elements "
F"for {self.order}-order filter, got {len(lowercase_ )}"
)
raise ValueError(lowercase_ )
A__ = a_coeffs
A__ = b_coeffs
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :float )-> float:
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 237
| 0
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = 10
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0) , _lowercase)
def __a ( self :List[Any]) -> Any:
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0) , _lowercase)
def __a ( self :List[str]) -> Tuple:
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0) , _lowercase)
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_lowercase)
self.assertEqual(_lowercase , [])
def __a ( self :int) -> str:
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_lowercase)
self.assertEqual(_lowercase , [])
self.assertEqual(_lowercase , [])
def __a ( self :Tuple) -> Tuple:
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_lowercase)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_lowercase , _lowercase)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_lowercase , _lowercase)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_lowercase , 0).numpy() , expected.numpy())
def __a ( self :int) -> Optional[int]:
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_lowercase , 23).numpy() , expected.numpy())
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_lowercase , 1).numpy() , expected.numpy())
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_lowercase , _lowercase)
np.testing.assert_array_equal(_lowercase , _lowercase)
| 344
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ):
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""", _UpperCAmelCase, )
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
| 352
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Any = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = "xlm"
__lowerCamelCase : Tuple = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self, lowerCamelCase__=3_0145, lowerCamelCase__=2048, lowerCamelCase__=12, lowerCamelCase__=16, lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=1, lowerCamelCase__=True, lowerCamelCase__=512, lowerCamelCase__=2048**-0.5, lowerCamelCase__=1e-12, lowerCamelCase__=0.02, lowerCamelCase__=0, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=3, lowerCamelCase__=5, lowerCamelCase__=True, lowerCamelCase__="first", lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=0.1, lowerCamelCase__=5, lowerCamelCase__=5, lowerCamelCase__=0, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__=0, **lowerCamelCase__, ):
A : Dict = vocab_size
A : int = emb_dim
A : str = n_layers
A : Union[str, Any] = n_heads
A : Optional[int] = dropout
A : Union[str, Any] = attention_dropout
A : Optional[Any] = gelu_activation
A : Dict = sinusoidal_embeddings
A : int = causal
A : Optional[Any] = asm
A : Any = n_langs
A : List[str] = use_lang_emb
A : Union[str, Any] = layer_norm_eps
A : str = bos_index
A : int = eos_index
A : Tuple = pad_index
A : str = unk_index
A : Optional[Any] = mask_index
A : Union[str, Any] = is_encoder
A : Tuple = max_position_embeddings
A : List[str] = embed_init_std
A : Tuple = init_std
A : Tuple = summary_type
A : int = summary_use_proj
A : List[Any] = summary_activation
A : Optional[Any] = summary_proj_to_labels
A : Optional[Any] = summary_first_dropout
A : Optional[int] = start_n_top
A : Optional[Any] = end_n_top
A : List[str] = mask_token_id
A : Tuple = lang_id
if "n_words" in kwargs:
A : List[str] = kwargs["""n_words"""]
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, **lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 115
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=1_3 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : List[Any]=3_2 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[str]=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[str]=None , ) -> Tuple:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : str ) -> Tuple:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , ) -> str:
__SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , ) -> Any:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["hidden_states"][0]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case__ : str = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case__ : List[Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : Optional[Any] = False
def UpperCAmelCase_ ( self : int ) -> Tuple:
__SCREAMING_SNAKE_CASE = LlamaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "single_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "multi_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
__SCREAMING_SNAKE_CASE = original_model(UpperCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
__SCREAMING_SNAKE_CASE = {"type": scaling_type, "factor": 10.0}
__SCREAMING_SNAKE_CASE = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
__SCREAMING_SNAKE_CASE = scaled_model(UpperCAmelCase__ ).last_hidden_state
__SCREAMING_SNAKE_CASE = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCAmelCase_ ( self : Dict ) -> int:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCAmelCase_ ( self : Dict ) -> int:
__SCREAMING_SNAKE_CASE = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__SCREAMING_SNAKE_CASE = model(torch.tensor(UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__SCREAMING_SNAKE_CASE = "Simply put, the theory of relativity states that "
__SCREAMING_SNAKE_CASE = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
__SCREAMING_SNAKE_CASE = model.generate(UpperCAmelCase__ , max_new_tokens=6_4 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 54
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_SCREAMING_SNAKE_CASE = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["GLPNFeatureExtractor"]
__a = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 286
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __lt__( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self, lowercase_ ) -> Dict:
"""simple docstring"""
return self[-1] == other[-1]
def UpperCAmelCase__ ( _A : list ):
'''simple docstring'''
a__ =[]
# sort into stacks
for element in collection:
a__ =Stack([element] )
a__ =bisect_left(_A , _A )
if i != len(_A ):
stacks[i].append(_A )
else:
stacks.append(_A )
# use a heap-based merge to merge stack efficiently
a__ =merge(*(reversed(_A ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 188
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=True, lowercase_=99, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=16, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_mask
a__ =use_token_type_ids
a__ =use_labels
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =scope
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowercase_, initializer_range=self.initializer_range, use_stable_embedding=lowercase_, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Any:
"""simple docstring"""
a__ =True
a__ =OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, )
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, )
a__ =model(lowercase_, attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> List[Any]:
"""simple docstring"""
a__ =True
a__ =True
a__ =OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, use_cache=lowercase_, )
a__ =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ =ids_tensor((self.batch_size, 3), config.vocab_size )
a__ =ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
a__ =torch.cat([input_ids, next_tokens], dim=-1 )
a__ =torch.cat([input_mask, next_mask], dim=-1 )
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, output_hidden_states=lowercase_, )['''hidden_states'''][0]
a__ =model(
lowercase_, attention_mask=lowercase_, encoder_hidden_states=lowercase_, encoder_attention_mask=lowercase_, past_key_values=lowercase_, output_hidden_states=lowercase_, )['''hidden_states'''][0]
# select random slice
a__ =ids_tensor((1,), output_from_past.shape[-1] ).item()
a__ =output_from_no_past[:, -3:, random_slice_idx].detach()
a__ =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowerCamelCase__ : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[str] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : int = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =OpenLlamaModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ =type
self.model_tester.create_and_check_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ ='''single_label_classification'''
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =3
a__ ='''multi_label_classification'''
a__ =input_dict['''input_ids''']
a__ =input_ids.ne(1 ).to(lowercase_ )
a__ =ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
a__ =OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =ids_tensor([1, 10], config.vocab_size )
a__ =ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ =OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
a__ =original_model(lowercase_ ).last_hidden_state
a__ =original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
a__ ={'''type''': scaling_type, '''factor''': 10.0}
a__ =OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
a__ =scaled_model(lowercase_ ).last_hidden_state
a__ =scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_, lowercase_, atol=1E-5 ) )
| 188
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class snake_case_:
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ):
pass
def _snake_case ( _snake_case : Image ):
lowerCAmelCase : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase_ )
import datasets
lowerCAmelCase : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
lowerCAmelCase : Optional[Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , lowerCAmelCase_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowerCamelCase__ ( self : Any ):
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Union[str, Any] = '''Intel/dpt-large'''
lowerCAmelCase : Dict = pipeline('''depth-estimation''' , model=lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowerCAmelCase : Union[str, Any] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase__ ( self : int ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 365
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , UpperCamelCase_ ):
lowerCAmelCase : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase : int = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase : Dict = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : Any = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 314
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_00 , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=[0, 1, 2, 3] , ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = 1_00
__UpperCamelCase : Union[str, Any] = batch_size
__UpperCamelCase : List[Any] = image_size
__UpperCamelCase : int = patch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Optional[Any] = is_training
__UpperCamelCase : Dict = use_labels
__UpperCamelCase : List[str] = hidden_size
__UpperCamelCase : List[str] = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : Optional[Any] = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : int = hidden_dropout_prob
__UpperCamelCase : Dict = attention_probs_dropout_prob
__UpperCamelCase : Optional[int] = type_sequence_label_size
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : int = scope
__UpperCamelCase : str = out_indices
__UpperCamelCase : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
__UpperCamelCase : Optional[int] = num_patches + 1
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : str = None
__UpperCamelCase : Dict = None
if self.use_labels:
__UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
__UpperCamelCase : str = BeitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = BeitForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Dict = self.type_sequence_label_size
__UpperCamelCase : Optional[int] = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : Union[str, Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase : Any = 1
__UpperCamelCase : Optional[Any] = BeitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : List[str] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = self.num_labels
__UpperCamelCase : str = BeitForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCamelCase : str = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = config_and_inputs
__UpperCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : int = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : Dict = False
lowercase : Union[str, Any] = False
lowercase : int = False
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = BeitModelTester(self )
__UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Tuple = [*signature.parameters.keys()]
__UpperCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
__UpperCamelCase : str = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__UpperCamelCase : Dict = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase : str = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
__UpperCamelCase : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = model(**__UpperCamelCase ).loss
loss.backward()
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : str = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] = BeitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase_ ():
__UpperCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(__UpperCamelCase )
__UpperCamelCase : Dict = self.default_image_processor
__UpperCamelCase : Tuple = prepare_img()
__UpperCamelCase : Dict = image_processor(images=__UpperCamelCase , return_tensors="pt" ).pixel_values.to(__UpperCamelCase )
# prepare bool_masked_pos
__UpperCamelCase : List[str] = torch.ones((1, 1_96) , dtype=torch.bool ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : Dict = model(pixel_values=__UpperCamelCase , bool_masked_pos=__UpperCamelCase )
__UpperCamelCase : int = outputs.logits
# verify the logits
__UpperCamelCase : Union[str, Any] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __UpperCamelCase )
__UpperCamelCase : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCamelCase , atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(__UpperCamelCase )
__UpperCamelCase : List[Any] = self.default_image_processor
__UpperCamelCase : int = prepare_img()
__UpperCamelCase : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : List[Any] = model(**__UpperCamelCase )
__UpperCamelCase : List[Any] = outputs.logits
# verify the logits
__UpperCamelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __UpperCamelCase )
__UpperCamelCase : Any = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
__UpperCamelCase : Any = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
__UpperCamelCase )
__UpperCamelCase : Optional[int] = self.default_image_processor
__UpperCamelCase : str = prepare_img()
__UpperCamelCase : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : str = model(**__UpperCamelCase )
__UpperCamelCase : Tuple = outputs.logits
# verify the logits
__UpperCamelCase : Union[str, Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __UpperCamelCase )
__UpperCamelCase : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
__UpperCamelCase : int = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__UpperCamelCase : List[Any] = model.to(__UpperCamelCase )
__UpperCamelCase : Dict = BeitImageProcessor(do_resize=__UpperCamelCase , size=6_40 , do_center_crop=__UpperCamelCase )
__UpperCamelCase : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__UpperCamelCase : Optional[int] = Image.open(ds[0]["file"] )
__UpperCamelCase : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : str = model(**__UpperCamelCase )
__UpperCamelCase : List[Any] = outputs.logits
# verify the logits
__UpperCamelCase : Tuple = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __UpperCamelCase )
__UpperCamelCase : Dict = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
__UpperCamelCase : Tuple = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__UpperCamelCase , )
else:
__UpperCamelCase : Any = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__UpperCamelCase : List[Any] = model.to(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = BeitImageProcessor(do_resize=__UpperCamelCase , size=6_40 , do_center_crop=__UpperCamelCase )
__UpperCamelCase : Dict = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__UpperCamelCase : List[str] = Image.open(ds[0]["file"] )
__UpperCamelCase : List[Any] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCamelCase )
__UpperCamelCase : Optional[Any] = outputs.logits.detach().cpu()
__UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(5_00, 3_00)] )
__UpperCamelCase : str = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
__UpperCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
__UpperCamelCase : Optional[int] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 171
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : Optional[Any] = 'canine'
def __init__( self , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_63_84 , __UpperCamelCase=16 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase=0Xe000 , __UpperCamelCase=0Xe001 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=8 , __UpperCamelCase=1_63_84 , __UpperCamelCase=1_28 , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
__UpperCamelCase : List[str] = max_position_embeddings
__UpperCamelCase : int = hidden_size
__UpperCamelCase : Tuple = num_hidden_layers
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Optional[int] = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : Dict = hidden_dropout_prob
__UpperCamelCase : List[str] = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : List[str] = type_vocab_size
__UpperCamelCase : str = layer_norm_eps
# Character config:
__UpperCamelCase : str = downsampling_rate
__UpperCamelCase : Tuple = upsampling_kernel_size
__UpperCamelCase : Union[str, Any] = num_hash_functions
__UpperCamelCase : List[str] = num_hash_buckets
__UpperCamelCase : List[Any] = local_transformer_stride
| 171
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A ={
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34
| 1
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : int = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowercase : str = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowercase : List[str] = {
"abeja/gpt-neox-japanese-2.7b": 2_0_4_8,
}
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
"""simple docstring"""
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Dict =json.loads(f.read() )
lowerCamelCase__ : str =collections.OrderedDict()
lowerCamelCase__ : List[str] =collections.OrderedDict()
lowerCamelCase__ : str =collections.OrderedDict()
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Union[str, Any] =f.readlines()
lowerCamelCase__ : Dict =[[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Dict =b
lowerCamelCase__ : Tuple =idx
for wd in b:
lowerCamelCase__ : Union[str, Any] =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : int, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : str="<|endoftext|>", lowerCamelCase : Union[str, Any]="<|endoftext|>", lowerCamelCase : Dict="<|startoftext|>", lowerCamelCase : Dict="<|endoftext|>", lowerCamelCase : Any=False, **lowerCamelCase : Tuple, )-> Optional[Any]:
super().__init__(
unk_token=lowerCamelCase, pad_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, do_clean_text=lowerCamelCase, **lowerCamelCase, )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowerCamelCase__ : Tuple =do_clean_text
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =load_vocab_and_emoji(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji )
@property
def snake_case ( self : Dict )-> Optional[Any]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self : Optional[int] )-> List[str]:
return dict(self.raw_vocab, **self.added_tokens_encoder )
def snake_case ( self : str, lowerCamelCase : Dict )-> Optional[Any]:
return self.subword_tokenizer.tokenize(lowerCamelCase, clean=self.do_clean_text )
def snake_case ( self : Optional[int], lowerCamelCase : Union[str, Any] )-> Tuple:
return self.vocab.get(lowerCamelCase, self.vocab.get(self.unk_token ) )
def snake_case ( self : Optional[int], lowerCamelCase : int )-> Optional[int]:
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase )
def snake_case ( self : str, lowerCamelCase : Any )-> Optional[Any]:
lowerCamelCase__ : Tuple =''''''.join(lowerCamelCase ).strip()
return out_string
def snake_case ( self : str, lowerCamelCase : "Conversation" )-> List[int]:
lowerCamelCase__ : int =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase, add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
lowerCamelCase__ : List[Any] =input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
lowerCamelCase__ : str =0
if os.path.isdir(lowerCamelCase ):
lowerCamelCase__ : Optional[int] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Dict =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowerCamelCase__ : List[str] =(
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowerCamelCase__ : Dict =(
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase__ : Any =token_index
writer.write(''','''.join(lowerCamelCase ) + '''\n''' )
index += 1
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
json.dump(self.emoji, lowerCamelCase )
return vocab_file, emoji_file
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Any, lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : Union[str, Any] =vocab # same as swe
lowerCamelCase__ : Optional[Any] =ids_to_tokens # same as bpe
lowerCamelCase__ : Optional[int] =emoji
lowerCamelCase__ : Tuple =np.max([len(lowerCamelCase ) for w in self.vocab.keys()] )
lowerCamelCase__ : List[Any] =re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowerCamelCase__ : Tuple =re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowerCamelCase__ : int =re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowerCamelCase__ : List[str] =re.compile(
r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCamelCase__ : int =re.compile(
r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowerCamelCase__ : Optional[int] =re.compile(
r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowerCamelCase__ : Optional[Any] ='''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowerCamelCase__ : List[str] ='''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowerCamelCase__ : str =str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : List[str] )-> Any:
return len(self.ids_to_tokens )
def snake_case ( self : Tuple, lowerCamelCase : Optional[Any] )-> List[str]:
lowerCamelCase__ : str =self.content_repattera.sub('''<URL>''', lowerCamelCase )
lowerCamelCase__ : int =self.content_repattera.sub('''<EMAIL>''', lowerCamelCase )
lowerCamelCase__ : Optional[int] =self.content_repattera.sub('''<TEL>''', lowerCamelCase )
lowerCamelCase__ : Dict =self.content_repattera.sub('''<DATE>''', lowerCamelCase )
lowerCamelCase__ : Any =self.content_repattera.sub('''<DATE>''', lowerCamelCase )
lowerCamelCase__ : int =self.content_repattera.sub('''<PRICE>''', lowerCamelCase )
lowerCamelCase__ : Any =content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCamelCase__ : List[str] =content.replace('''<BLOCK><BLOCK>''', '''<BLOCK>''' )
return content
def snake_case ( self : str, lowerCamelCase : Any, lowerCamelCase : List[Any]=False )-> List[str]:
lowerCamelCase__ : List[str] =text.replace(''' ''', '''<SP>''' )
lowerCamelCase__ : Union[str, Any] =text.replace(''' ''', '''<SP>''' )
lowerCamelCase__ : Optional[Any] =text.replace('''\r\n''', '''<BR>''' )
lowerCamelCase__ : Optional[Any] =text.replace('''\n''', '''<BR>''' )
lowerCamelCase__ : Optional[int] =text.replace('''\r''', '''<BR>''' )
lowerCamelCase__ : str =text.replace('''\t''', '''<TAB>''' )
lowerCamelCase__ : List[str] =text.replace('''—''', '''ー''' )
lowerCamelCase__ : Optional[int] =text.replace('''−''', '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase__ : int =text.replace(lowerCamelCase, lowerCamelCase )
if clean:
lowerCamelCase__ : Optional[Any] =self.clean_text(lowerCamelCase )
def check_simbol(lowerCamelCase : str ):
lowerCamelCase__ : Union[str, Any] =x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2:
lowerCamelCase__ : List[Any] =(int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2_a1 and c <= 0xc2_bf)
or (c >= 0xc7_80 and c <= 0xc7_83)
or (c >= 0xca_b9 and c <= 0xcb_bf)
or (c >= 0xcc_80 and c <= 0xcd_a2)
):
return True
return False
def checkuae(lowerCamelCase : List[Any] ):
lowerCamelCase__ : int =x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3:
lowerCamelCase__ : Tuple =(int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_80_80 and c <= 0xe2_b0_7f:
return True
return False
lowerCamelCase__ : Union[str, Any] =0
lowerCamelCase__ : Dict =[]
while pos < len(lowerCamelCase ):
lowerCamelCase__ : List[str] =min(len(lowerCamelCase ), pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowerCamelCase__ : List[Any] =[] # (token_id, token, pos)
for e in range(lowerCamelCase, lowerCamelCase, -1 ):
lowerCamelCase__ : Any =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase ) > 2:
lowerCamelCase__ : Optional[int] =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCamelCase ) > 0:
# the smallest token_id is adopted
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =sorted(lowerCamelCase, key=lambda lowerCamelCase : x[0] )[0]
result.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =e
else:
lowerCamelCase__ : List[Any] =pos + 1
lowerCamelCase__ : Tuple =text[pos:end]
if check_simbol(lowerCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(lowerCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowerCamelCase__ : List[Any] =end
return result
def snake_case ( self : Tuple, lowerCamelCase : Tuple, lowerCamelCase : Optional[int]="\n" )-> Optional[int]:
lowerCamelCase__ : List[str] =[]
lowerCamelCase__ : List[str] =[]
lowerCamelCase__ : Optional[Any] =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode('''utf-8''', errors='''replace''' ) )
lowerCamelCase__ : List[Any] =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowerCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode('''utf-8''', errors='''replace''' ) )
lowerCamelCase__ : Tuple =''''''.join(lowerCamelCase )
return text
| 272
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
if "model" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase__ : List[Any] =orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase__ : str =orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase__ : Dict =orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowerCamelCase__ : List[str] =orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowerCamelCase__ : Dict =orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase__ : Union[str, Any] =orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowerCamelCase__ : str =orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase__ : Tuple =orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase__ : Optional[int] =orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase__ : Optional[int] ='''yoso.''' + orig_key
return orig_key
def snake_case__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : Optional[Any] =orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase__ : List[str] =val
lowerCamelCase__ : Optional[int] =orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase__ : str =torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase__ : List[Any] =YosoConfig.from_json_file(__lowerCamelCase )
lowerCamelCase__ : List[str] =YosoForMaskedLM(__lowerCamelCase )
lowerCamelCase__ : Tuple =convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 272
| 1
|
import argparse
import struct
import unittest
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->None:
'''simple docstring'''
A_ : Optional[int] = data
# Initialize hash values
A_ : Union[str, Any] = [
0x6_a_0_9_e_6_6_7,
0xb_b_6_7_a_e_8_5,
0x3_c_6_e_f_3_7_2,
0xa_5_4_f_f_5_3_a,
0x5_1_0_e_5_2_7_f,
0x9_b_0_5_6_8_8_c,
0x1_f_8_3_d_9_a_b,
0x5_b_e_0_c_d_1_9,
]
# Initialize round constants
A_ : Optional[Any] = [
0x4_2_8_a_2_f_9_8,
0x7_1_3_7_4_4_9_1,
0xb_5_c_0_f_b_c_f,
0xe_9_b_5_d_b_a_5,
0x3_9_5_6_c_2_5_b,
0x5_9_f_1_1_1_f_1,
0x9_2_3_f_8_2_a_4,
0xa_b_1_c_5_e_d_5,
0xd_8_0_7_a_a_9_8,
0x1_2_8_3_5_b_0_1,
0x2_4_3_1_8_5_b_e,
0x5_5_0_c_7_d_c_3,
0x7_2_b_e_5_d_7_4,
0x8_0_d_e_b_1_f_e,
0x9_b_d_c_0_6_a_7,
0xc_1_9_b_f_1_7_4,
0xe_4_9_b_6_9_c_1,
0xe_f_b_e_4_7_8_6,
0x0_f_c_1_9_d_c_6,
0x2_4_0_c_a_1_c_c,
0x2_d_e_9_2_c_6_f,
0x4_a_7_4_8_4_a_a,
0x5_c_b_0_a_9_d_c,
0x7_6_f_9_8_8_d_a,
0x9_8_3_e_5_1_5_2,
0xa_8_3_1_c_6_6_d,
0xb_0_0_3_2_7_c_8,
0xb_f_5_9_7_f_c_7,
0xc_6_e_0_0_b_f_3,
0xd_5_a_7_9_1_4_7,
0x0_6_c_a_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_b_7_0_a_8_5,
0x2_e_1_b_2_1_3_8,
0x4_d_2_c_6_d_f_c,
0x5_3_3_8_0_d_1_3,
0x6_5_0_a_7_3_5_4,
0x7_6_6_a_0_a_b_b,
0x8_1_c_2_c_9_2_e,
0x9_2_7_2_2_c_8_5,
0xa_2_b_f_e_8_a_1,
0xa_8_1_a_6_6_4_b,
0xc_2_4_b_8_b_7_0,
0xc_7_6_c_5_1_a_3,
0xd_1_9_2_e_8_1_9,
0xd_6_9_9_0_6_2_4,
0xf_4_0_e_3_5_8_5,
0x1_0_6_a_a_0_7_0,
0x1_9_a_4_c_1_1_6,
0x1_e_3_7_6_c_0_8,
0x2_7_4_8_7_7_4_c,
0x3_4_b_0_b_c_b_5,
0x3_9_1_c_0_c_b_3,
0x4_e_d_8_a_a_4_a,
0x5_b_9_c_c_a_4_f,
0x6_8_2_e_6_f_f_3,
0x7_4_8_f_8_2_e_e,
0x7_8_a_5_6_3_6_f,
0x8_4_c_8_7_8_1_4,
0x8_c_c_7_0_2_0_8,
0x9_0_b_e_f_f_f_a,
0xa_4_5_0_6_c_e_b,
0xb_e_f_9_a_3_f_7,
0xc_6_7_1_7_8_f_2,
]
A_ : str = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->bytes:
'''simple docstring'''
A_ : List[Any] = B'''\x80''' + (B'''\x00''' * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
A_ : List[str] = struct.pack('''>Q''' , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def _snake_case ( self )->None:
'''simple docstring'''
A_ : List[str] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A_ : int = list(struct.unpack('''>16L''' , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A_ : str = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A_ : Dict = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A_ : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
A_ : str = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
A_ : int = (e & f) ^ ((~e & 0xf_f_f_f_f_f_f_f) & g)
A_ : Union[str, Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
A_ : Union[str, Any] = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
A_ : int = (a & b) ^ (a & c) ^ (b & c)
A_ : Optional[Any] = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ : List[Any] = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
A_ : str = [a, b, c, d, e, f, g, h]
# Modify final values
A_ : Any = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A_ : Optional[Any] = ''''''.join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return 0xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations)
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->None:
'''simple docstring'''
import hashlib
A_ : List[str] = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def _SCREAMING_SNAKE_CASE ( ):
import doctest
doctest.testmod()
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
A_ : List[Any] = parser.parse_args()
A_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
A_ : Tuple = f.read()
else:
A_ : List[str] = bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 186
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DDIMPipeline
snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''unet''': unet, '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Optional[Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Dict = self.get_dummy_components()
A_ : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Any = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
A_ : List[Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
A_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _snake_case ( self )->Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : int = '''google/ddpm-cifar10-32'''
A_ : Tuple = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = DDIMScheduler()
A_ : str = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddim.to(_SCREAMING_SNAKE_CASE )
ddim.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Any = ddim(generator=_SCREAMING_SNAKE_CASE , eta=0.0 , output_type='''numpy''' ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Tuple = '''google/ddpm-ema-bedroom-256'''
A_ : int = UNetaDModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Any = DDIMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = DDIMPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
ddpm.to(_SCREAMING_SNAKE_CASE )
ddpm.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Dict = torch.manual_seed(0 )
A_ : List[str] = ddpm(generator=_SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Tuple = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 186
| 1
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
def __call__(self ) -> Dict:
_snake_case = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_snake_case = 1
_snake_case = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
_snake_case = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
_snake_case = scheduler_output - scheduler_output + torch.ones_like(UpperCAmelCase )
return result
| 270
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 270
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 127
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_SCREAMING_SNAKE_CASE : List[str] = "hf-internal-testing/tiny-random-bert"
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
_SCREAMING_SNAKE_CASE : Optional[int] = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = cached_file(__snake_case , __snake_case )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__snake_case ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__snake_case , __snake_case ) ) )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
self.assertTrue(os.path.isfile(__snake_case ) )
# File is cached at the same place the second time.
snake_case = cached_file(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(__snake_case , __snake_case , revision='''9b8c223''' )
self.assertEqual(__snake_case , os.path.join(__snake_case , '''snapshots''' , __snake_case , __snake_case ) )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , __snake_case )
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
snake_case = cached_file(__snake_case , __snake_case , revision='''aaaa''' )
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
def a_ ( self ):
with self.assertRaisesRegex(__snake_case , '''does not appear to have a file named''' ):
snake_case = cached_file(__snake_case , '''conf''' )
with open(os.path.join(__snake_case , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(__snake_case , '''.no_exist''' , __snake_case , '''conf''' ) ) )
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = cached_file(__snake_case , '''conf''' , local_files_only=__snake_case , _raise_exceptions_for_missing_entries=__snake_case )
self.assertIsNone(__snake_case )
snake_case = mock.Mock()
snake_case = 5_0_0
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=__snake_case ) as mock_head:
snake_case = cached_file(__snake_case , '''conf''' , _raise_exceptions_for_connection_errors=__snake_case )
self.assertIsNone(__snake_case )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , __snake_case ) )
def a_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , __snake_case )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__snake_case , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , __snake_case , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , __snake_case )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(__snake_case , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def a_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(__snake_case ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(__snake_case , '''a.txt''' ) , str(__snake_case ) )
self.assertIsNone(get_file_from_repo(__snake_case , '''b.txt''' ) )
| 127
| 1
|
__A ='''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowerCamelCase_ = Stack()
lowerCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase__ )
elif i == ")":
# RULE 4
lowerCamelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCamelCase_ = operators[opr](lowerCamelCase__ , lowerCamelCase__ )
operand_stack.push(lowerCamelCase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__A ='''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 47
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = UniSpeechSatConfig.from_pretrained(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatConfig()
lowerCamelCase_ = ""
if is_finetuned:
lowerCamelCase_ = UniSpeechSatForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = UniSpeechSatForPreTraining(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 47
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_lowerCamelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : str = AudioClassificationPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
# test with a raw waveform
__A : Tuple = np.zeros((3_4000,) )
__A : str = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[str] = examples
__A : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
__A : int = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(__SCREAMING_SNAKE_CASE )
@require_torchaudio
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
import datasets
# test with a local file
__A : Optional[int] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__A : Any = dataset[0]['''audio''']['''array''']
__A : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = '''anton-l/wav2vec2-random-tiny-classifier'''
__A : str = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = np.ones((8000,) )
__A : List[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
__A : Dict = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__A : Optional[int] = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__A : List[str] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__A : Dict = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def UpperCamelCase__( self ):
'''simple docstring'''
import datasets
__A : Any = '''superb/wav2vec2-base-superb-ks'''
__A : Optional[Any] = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
__A : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__A : int = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__A : Optional[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def UpperCamelCase__( self ):
'''simple docstring'''
pass
| 179
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_text_model'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=5_02_44 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = d_kv
lowercase_ : List[str] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Optional[Any] = num_heads
lowercase_ : Union[str, Any] = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Union[str, Any] = dropout_rate
lowercase_ : Dict = layer_norm_epsilon
lowercase_ : Dict = initializer_factor
lowercase_ : List[Any] = use_cache
lowercase_ : Optional[int] = eos_token_id
lowercase_ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowercase_ : Any = dense_act_fn
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct_vision_model'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE="gelu_new" , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1E-1_0 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=40_96 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Any = patch_embed_hidden_size
lowercase_ : List[Any] = d_ff
lowercase_ : Dict = dropout_rate
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = initializer_range
lowercase_ : Dict = initializer_factor
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : str = dense_act_fn
lowercase_ : Dict = seq_len
lowercase_ : List[Any] = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Optional[int] = d_kv
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : str = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
lowercase_ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''pix2struct'''
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text_config is None:
lowercase_ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
lowercase_ : Dict = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
lowercase_ : str = PixaStructTextConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = PixaStructVisionConfig(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.text_config.decoder_start_token_id
lowercase_ : Union[str, Any] = self.text_config.pad_token_id
lowercase_ : Union[str, Any] = self.text_config.eos_token_id
lowercase_ : int = initializer_factor
lowercase_ : Any = initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : str = self.initializer_range
lowercase_ : int = is_vqa
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = copy.deepcopy(self.__dict__ )
lowercase_ : Any = self.text_config.to_dict()
lowercase_ : Optional[Any] = self.vision_config.to_dict()
lowercase_ : Optional[int] = self.__class__.model_type
return output
| 93
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _snake_case ( a__ ):
snake_case__ = "Salesforce/blip-image-captioning-base"
snake_case__ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case__ = "image_captioner"
snake_case__ = AutoModelForVisionaSeq
snake_case__ = ["image"]
snake_case__ = ["text"]
def __init__( self : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : "Image" ):
return self.pre_processor(images=UpperCAmelCase , return_tensors="pt" )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : int ):
return self.model.generate(**UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Dict ):
return self.pre_processor.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )[0].strip()
| 64
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _snake_case ( a__ ):
snake_case__ = "bert-generation"
def __init__( self : Optional[int] , UpperCAmelCase : Dict=50358 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : str=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
| 64
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict =logging.get_logger(__name__)
_A : List[Any] ={
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowercase ( _lowercase ):
a = """vivit"""
def __init__( self: List[str] , UpperCamelCase__: List[Any]=224 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: int=[2, 16, 16] , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Dict=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: List[str]=3_072 , UpperCamelCase__: Optional[int]="gelu_fast" , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-06 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : List[str] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : int = image_size
lowerCamelCase__ : str = num_frames
lowerCamelCase__ : Optional[Any] = tubelet_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Any = qkv_bias
super().__init__(**UpperCamelCase__ )
| 41
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
A : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A : List[Any] = '''rwkv.''' + name
A : Dict = weight
return state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ):
'''simple docstring'''
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A : int = 5_0277
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
A : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
A : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
A, A : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
A : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3
| 0
|
"""simple docstring"""
class snake_case :
def __init__( self : Union[str, Any] , A : Any , A : str ):
'''simple docstring'''
a : int = name
a : List[Any] = val
def __str__( self : List[str] ):
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : Tuple , A : List[str] ):
'''simple docstring'''
return self.val < other.val
class snake_case :
def __init__( self : List[str] , A : Optional[int] ):
'''simple docstring'''
a : Tuple = {}
a : Any = {}
a : Any = self.build_heap(A )
def __getitem__( self : Optional[Any] , A : List[Any] ):
'''simple docstring'''
return self.get_value(A )
def lowerCamelCase__ ( self : Tuple , A : List[str] ):
'''simple docstring'''
return (idx - 1) // 2
def lowerCamelCase__ ( self : int , A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowerCamelCase__ ( self : Dict , A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 2
def lowerCamelCase__ ( self : List[str] , A : List[Any] ):
'''simple docstring'''
return self.heap_dict[key]
def lowerCamelCase__ ( self : str , A : List[str] ):
'''simple docstring'''
a : Tuple = len(A ) - 1
a : Any = self.get_parent_idx(A )
for idx, i in enumerate(A ):
a : Tuple = idx
a : List[Any] = i.val
for i in range(A , -1 , -1 ):
self.sift_down(A , A )
return array
def lowerCamelCase__ ( self : Tuple , A : List[Any] , A : List[Any] ):
'''simple docstring'''
while True:
a : Optional[Any] = self.get_left_child_idx(A ) # noqa: E741
a : Tuple = self.get_right_child_idx(A )
a : str = idx
if l < len(A ) and array[l] < array[idx]:
a : List[str] = l
if r < len(A ) and array[r] < array[smallest]:
a : Any = r
if smallest != idx:
a, a : Optional[Any] = array[smallest], array[idx]
(
(
a
), (
a
),
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a : int = smallest
else:
break
def lowerCamelCase__ ( self : Any , A : Optional[Any] ):
'''simple docstring'''
a : Any = self.get_parent_idx(A )
while p >= 0 and self.heap[p] > self.heap[idx]:
a, a : int = self.heap[idx], self.heap[p]
a, a : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a : int = p
a : Optional[Any] = self.get_parent_idx(A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.heap[0]
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a, a : Optional[int] = self.heap[-1], self.heap[0]
a, a : str = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a : Dict = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase__ ( self : Optional[int] , A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(A )
a : int = len(self.heap ) - 1
a : Tuple = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.heap ) == 0
def lowerCamelCase__ ( self : Tuple , A : List[str] , A : Dict ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a : Union[str, Any] = new_value
a : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
_UpperCamelCase : str = Node('R', -1)
_UpperCamelCase : Optional[int] = Node('B', 6)
_UpperCamelCase : str = Node('A', 3)
_UpperCamelCase : Optional[Any] = Node('X', 1)
_UpperCamelCase : Union[str, Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_UpperCamelCase : str = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_UpperCamelCase : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case ( UpperCAmelCase ):
__magic_name__ = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
__magic_name__ = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
__magic_name__ = field(
default=UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(A , A ):
a : str = v.to_dict()
return d
| 186
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.