code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase :Any = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] , _A : str , _A : bool , _A : str = None , _A : list = None ) -> Dict:
__magic_name__ : List[str] = None
__magic_name__ : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__magic_name__ : List[str] = os.path.abspath('examples' )
for item in os.listdir(_A ):
if item not in EXCLUDE_EXAMPLES:
__magic_name__ : Any = os.path.join(_A , _A )
if os.path.isfile(_A ) and ".py" in item_path:
with self.subTest(
tested_script=_A , feature_script=_A , tested_section='main()' if parser_only else 'training_function()' , ):
__magic_name__ : Optional[Any] = compare_against_test(
os.path.join(_A , _A ) , _A , _A , _A )
__magic_name__ : str = '\n'.join(_A )
if special_strings is not None:
for string in special_strings:
__magic_name__ : Any = diff.replace(_A , '' )
self.assertEqual(_A , '' )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
self.one_complete_example('complete_nlp_example.py' , _A )
self.one_complete_example('complete_nlp_example.py' , _A )
def __lowerCAmelCase ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__magic_name__ : str = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = False
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) -> List[str]:
super().setUpClass()
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : List[Any] = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__magic_name__ : Tuple = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : int ) -> str:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCAmelCase ( self : Dict ) -> int:
__magic_name__ : int = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Any = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__magic_name__ : str = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : Union[str, Any] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__magic_name__ : List[Any] = run_command(self._launch_args + testargs , return_stdout=_A )
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
def __lowerCAmelCase ( self : Dict ) -> Dict:
__magic_name__ : List[str] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__magic_name__ : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=_A )
if torch.cuda.is_available():
__magic_name__ : Optional[int] = torch.cuda.device_count()
else:
__magic_name__ : Any = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
else:
self.assertIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
__magic_name__ : str = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__magic_name__ : List[str] = run_command(self._launch_args + testargs , return_stdout=_A )
__magic_name__ : List[str] = re.findall('({.+})' , _A )
__magic_name__ : Union[str, Any] = [r for r in results if 'accuracy' in r][-1]
__magic_name__ : List[str] = ast.literal_eval(_A )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
__magic_name__ : Optional[int] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
__magic_name__ : Dict = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_A , 'tracking' ) ) )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : int = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __lowerCAmelCase ( self : Dict ) -> Dict:
__magic_name__ : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'codegen'
_lowerCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase_=50_400 , lowercase_=2_048 , lowercase_=2_048 , lowercase_=4_096 , lowercase_=28 , lowercase_=16 , lowercase_=64 , lowercase_=None , lowercase_="gelu_new" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_=True , lowercase_=50_256 , lowercase_=50_256 , lowercase_=False , **lowercase_ , ):
_snake_case : Optional[int] = vocab_size
_snake_case : List[str] = n_ctx
_snake_case : Any = n_positions
_snake_case : List[str] = n_embd
_snake_case : List[str] = n_layer
_snake_case : Dict = n_head
_snake_case : Dict = n_inner
_snake_case : Union[str, Any] = rotary_dim
_snake_case : Tuple = activation_function
_snake_case : Union[str, Any] = resid_pdrop
_snake_case : List[Any] = embd_pdrop
_snake_case : Dict = attn_pdrop
_snake_case : Tuple = layer_norm_epsilon
_snake_case : Optional[Any] = initializer_range
_snake_case : Union[str, Any] = use_cache
_snake_case : Dict = bos_token_id
_snake_case : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ = "default" , lowercase_ = None , lowercase_ = False , ):
super().__init__(lowercase_ , task=lowercase_ , patching_specs=lowercase_ , use_past=lowercase_ )
if not getattr(self._config , "pad_token_id" , lowercase_ ):
# TODO: how to do that better?
_snake_case : Optional[int] = 0
@property
def UpperCamelCase ( self ):
_snake_case : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
_snake_case : str = {0: "batch", 1: "past_sequence + sequence"}
else:
_snake_case : List[str] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase ( self ):
return self._config.n_layer
@property
def UpperCamelCase ( self ):
return self._config.n_head
def UpperCamelCase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ):
_snake_case : List[Any] = super(lowercase_ , self ).generate_dummy_inputs(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
# We need to order the input in the way they appears in the forward()
_snake_case : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_snake_case ,_snake_case : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_snake_case : Tuple = seqlen + 2
_snake_case : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : Tuple = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(self.num_layers )
]
_snake_case : str = common_inputs["attention_mask"]
if self.use_past:
_snake_case : Optional[int] = ordered_inputs["attention_mask"].dtype
_snake_case : int = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self ):
return 13 | 284 | import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : int = args.pruning_method
_snake_case : List[Any] = args.threshold
_snake_case : Optional[Any] = args.model_name_or_path.rstrip("/" )
_snake_case : List[str] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_snake_case : List[Any] = torch.load(os.path.join(__lowercase , "pytorch_model.bin" ) )
_snake_case : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case : Tuple = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_snake_case : Optional[int] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_snake_case : List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_snake_case : Tuple = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case : Optional[Any] = name[:-6]
_snake_case : Any = model[F"""{prefix_}mask_scores"""]
_snake_case : Tuple = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case : Optional[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case : int = name[:-6]
_snake_case : List[Any] = model[F"""{prefix_}mask_scores"""]
_snake_case : List[str] = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case : int = name[:-6]
_snake_case : Any = model[F"""{prefix_}mask_scores"""]
_snake_case ,_snake_case : Union[str, Any] = -0.1, 1.1
_snake_case : Dict = torch.sigmoid(__lowercase )
_snake_case : List[str] = s * (r - l) + l
_snake_case : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_snake_case : Any = os.path.join(
os.path.dirname(__lowercase ) , F"""bertarized_{os.path.basename(__lowercase )}""" )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowercase , os.path.join(__lowercase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args) | 284 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =[
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__snake_case =[
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
lowerCAmelCase = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase = int(re.match(R'.*layer_(\d*).*' , lowerCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a_ ( lowerCamelCase : List[Any] ):
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase = re.search(R'[^\d](\d+)$' , str(lowerCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def a_ ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
# Construct model
if bloom_config_file == "":
lowerCAmelCase = BloomConfig()
else:
lowerCAmelCase = BloomConfig.from_json_file(lowerCamelCase )
if shard_model:
lowerCAmelCase = os.listdir(lowerCamelCase )
lowerCAmelCase = sorted(filter(lambda lowerCamelCase : s.startswith('layer' ) and "model_00" in s , lowerCamelCase ) )
lowerCAmelCase = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = BloomConfig()
for j, file in enumerate(lowerCamelCase ):
print('Processing file: {}'.format(lowerCamelCase ) )
lowerCAmelCase = None
for i in range(lowerCamelCase ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(lowerCamelCase , lowerCamelCase ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(lowerCamelCase )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
lowerCamelCase , os.path.join(
lowerCamelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase ) ).zfill(5 ) )
lowerCAmelCase = BloomConfig()
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = total_size
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCamelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase = json.dumps(lowerCamelCase , indent=2 , sort_keys=lowerCamelCase ) + '\n'
f.write(lowerCamelCase )
else:
lowerCAmelCase = BloomModel(lowerCamelCase )
lowerCAmelCase = os.listdir(lowerCamelCase )
lowerCAmelCase = sorted(filter(lambda lowerCamelCase : s.startswith('layer' ) and "model_00" in s , lowerCamelCase ) )
lowerCAmelCase = None
for i, file in enumerate(lowerCamelCase ):
lowerCAmelCase = None
for i in range(lowerCamelCase ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(lowerCamelCase , lowerCamelCase ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(lowerCamelCase )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
lowerCAmelCase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCAmelCase = set(other_keys.missing_keys )
else:
lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__snake_case =parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 4 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 138 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( lowerCamelCase__):
def __init__( self: Optional[int] , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 357 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Optional[Any]=99 , lowerCAmelCase__ : List[str]=36 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=512 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Dict=6 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]=1000 , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = parent
SCREAMING_SNAKE_CASE_: Any = batch_size
SCREAMING_SNAKE_CASE_: str = num_channels
SCREAMING_SNAKE_CASE_: List[str] = image_size
SCREAMING_SNAKE_CASE_: Dict = patch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_: int = use_input_mask
SCREAMING_SNAKE_CASE_: Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE_: Optional[int] = use_labels
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: Any = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict = num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_: Any = hidden_act
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: Union[str, Any] = coordinate_size
SCREAMING_SNAKE_CASE_: Optional[Any] = shape_size
SCREAMING_SNAKE_CASE_: str = num_labels
SCREAMING_SNAKE_CASE_: str = num_choices
SCREAMING_SNAKE_CASE_: Tuple = scope
SCREAMING_SNAKE_CASE_: Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_: str = text_seq_length
SCREAMING_SNAKE_CASE_: str = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_: str = self.text_seq_length + self.image_seq_length
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Dict = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_: Dict = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_: List[str] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_: str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_: Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_: Optional[int] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_: Optional[int] = tmp_coordinate
SCREAMING_SNAKE_CASE_: Optional[int] = tf.constant(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Tuple = random_attention_mask([self.batch_size, self.text_seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: Optional[int] = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_: Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = TFLayoutLMvaModel(config=lowerCAmelCase__)
# text + image
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , training=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
SCREAMING_SNAKE_CASE_: Union[str, Any] = model({"pixel_values": pixel_values} , training=lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE_: Optional[int] = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: str = self.num_labels
SCREAMING_SNAKE_CASE_: str = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , training=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Dict = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple):
return True
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False):
SCREAMING_SNAKE_CASE_: Optional[int] = copy.deepcopy(lowerCAmelCase__)
if model_class in get_values(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
k: tf.tile(tf.expand_dims(lowerCAmelCase__ , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(lowerCAmelCase__ , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
SCREAMING_SNAKE_CASE_: Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = TFLayoutLMvaModelTester(self)
SCREAMING_SNAKE_CASE_: int = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
if getattr(lowerCAmelCase__ , "hf_compute_loss" , lowerCAmelCase__):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE_: Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase__)[0]
]
SCREAMING_SNAKE_CASE_: List[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE_: List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = prepared_for_class.pop("input_ids")
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , **lowerCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE_: Dict = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE_: Any = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE_: Union[str, Any] = -100
SCREAMING_SNAKE_CASE_: Union[str, Any] = tf.convert_to_tensor(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , **lowerCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE_: List[str] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__)
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE_: Any = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE_: str = inspect.signature(model.call).parameters
SCREAMING_SNAKE_CASE_: Union[str, Any] = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE_: int = {0: "input_ids"}
for label_key in label_keys:
SCREAMING_SNAKE_CASE_: Dict = signature_names.index(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = label_key
SCREAMING_SNAKE_CASE_: Any = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE_: List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE_: Tuple = prepared_for_class[value]
SCREAMING_SNAKE_CASE_: int = tuple(lowerCAmelCase__)
# Send to model
SCREAMING_SNAKE_CASE_: List[str] = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_: Tuple = type
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: List[str] = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
SCREAMING_SNAKE_CASE_: List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE_: Dict = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors="tf").pixel_values
SCREAMING_SNAKE_CASE_: str = tf.constant([[1, 2]])
SCREAMING_SNAKE_CASE_: int = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
SCREAMING_SNAKE_CASE_: Any = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
| 13 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 1 |
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->float:
return base * power(UpperCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
a__: List[str] = int(input('Enter the base: ').strip())
a__: Dict = int(input('Enter the exponent: ').strip())
a__: Optional[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a__: Any = 1 / result
print(F"{base} to the power of {exponent} is {result}")
| 39 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''MCTCTFeatureExtractor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
A__ = self.feature_extractor
A__ = False
def __call__( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
A__ = kwargs.pop('''raw_speech''' )
else:
A__ = kwargs.pop('''audio''',__lowerCamelCase )
A__ = kwargs.pop('''sampling_rate''',__lowerCamelCase )
A__ = kwargs.pop('''text''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
A__ = self.feature_extractor(__lowerCamelCase,*__lowerCamelCase,sampling_rate=__lowerCamelCase,**__lowerCamelCase )
if text is not None:
A__ = self.tokenizer(__lowerCamelCase,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase,**__lowerCamelCase )
A__ = kwargs.pop('''input_features''',__lowerCamelCase )
A__ = kwargs.pop('''labels''',__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
A__ = args[0]
A__ = args[1:]
if input_features is not None:
A__ = self.feature_extractor.pad(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if labels is not None:
A__ = self.tokenizer.pad(__lowerCamelCase,**__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ = labels['''input_ids''']
return input_features
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@contextmanager
def UpperCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
A__ = True
A__ = self.tokenizer
yield
A__ = self.feature_extractor
A__ = False
| 39 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
UpperCamelCase :int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :str = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase :List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase :List[str] = {"""unk_token""": """<unk>"""}
UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCamelCase ) )
UpperCamelCase :List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict , **__lowerCamelCase : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _A ( self : str , **__lowerCamelCase : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase )
def _A ( self : Tuple , **__lowerCamelCase : List[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : str ):
shutil.rmtree(self.tmpdirname )
def _A ( self : str ):
UpperCamelCase :Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self : str ):
UpperCamelCase :List[Any] = self.get_tokenizer()
UpperCamelCase :List[Any] = self.get_rust_tokenizer()
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase :int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase :str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase :str = self.get_image_processor(do_normalize=__lowerCamelCase )
UpperCamelCase :str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :Dict = self.get_tokenizer()
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Dict = self.prepare_image_inputs()
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :Dict = processor(images=__lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self : Dict ):
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :List[Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[Any] = """lower newer"""
UpperCamelCase :List[str] = processor(text=__lowerCamelCase , return_tensors="""np""" )
UpperCamelCase :List[str] = tokenizer(__lowerCamelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _A ( self : Optional[int] ):
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :Optional[int] = """lower newer"""
UpperCamelCase :Tuple = self.prepare_image_inputs()
UpperCamelCase :int = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32"""
UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = ["""cat""", """nasa badge"""]
UpperCamelCase :str = processor(text=__lowerCamelCase )
UpperCamelCase :Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : List[str] ):
UpperCamelCase :Dict = """google/owlvit-base-patch32"""
UpperCamelCase :Dict = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Tuple = [["""cat""", """nasa badge"""], ["""person"""]]
UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase )
UpperCamelCase :int = 16
UpperCamelCase :int = len(__lowerCamelCase )
UpperCamelCase :Dict = max([len(__lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32"""
UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase )
UpperCamelCase :Optional[int] = ["""cat""", """nasa badge"""]
UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase )
UpperCamelCase :str = 16
UpperCamelCase :Optional[int] = inputs["""input_ids"""]
UpperCamelCase :str = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _A ( self : str ):
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(images=__lowerCamelCase , query_images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _A ( self : Dict ):
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :str = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
UpperCamelCase :str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Any = processor.batch_decode(__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''DeiTFeatureExtractor''']
_lowercase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = original_name.split('''.''' )[0]
snake_case__ : List[str] = key.split('''.''' )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 2] )
snake_case__ : Optional[int] = int(key_list[key_list.index(__lowerCAmelCase ) - 1] )
snake_case__ : Any = orig_block_num - offset
snake_case__ : Tuple = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = OrderedDict()
snake_case__ , snake_case__ : List[str] = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case__ : int = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case__ : Tuple = key[: key.find('''proj''' )]
snake_case__ : Union[str, Any] = key.replace(__lowerCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
snake_case__ : Dict = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case__ : Optional[int] = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case__ : Optional[Any] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case__ : int = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case__ : Tuple = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case__ : str = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case__ : Optional[int] = replace_key_with_offset(__lowerCAmelCase , __lowerCAmelCase , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case__ : Union[str, Any] = key.replace('''head''' , '''classifier''' )
snake_case__ : Union[str, Any] = value
return new_state_dict
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[str] = PoolFormerConfig()
# set attributes based on model_name
snake_case__ : List[Any] = '''huggingface/label-files'''
snake_case__ : Union[str, Any] = model_name[-3:]
snake_case__ : List[Any] = 1000
snake_case__ : Tuple = '''imagenet-1k-id2label.json'''
snake_case__ : Optional[int] = (1, 1000)
# set config attributes
snake_case__ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Tuple = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case__ : List[str] = [2, 2, 6, 2]
snake_case__ : Union[str, Any] = [64, 128, 320, 512]
snake_case__ : Optional[int] = 4.0
snake_case__ : Tuple = 0.9
elif size == "s24":
snake_case__ : Tuple = [4, 4, 12, 4]
snake_case__ : Tuple = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Dict = 0.9
elif size == "s36":
snake_case__ : Optional[Any] = [6, 6, 18, 6]
snake_case__ : str = [64, 128, 320, 512]
snake_case__ : List[Any] = 4.0
snake_case__ : Any = 1E-6
snake_case__ : Any = 0.9
elif size == "m36":
snake_case__ : Any = [6, 6, 18, 6]
snake_case__ : Union[str, Any] = [96, 192, 384, 768]
snake_case__ : Dict = 4.0
snake_case__ : Union[str, Any] = 1E-6
snake_case__ : List[Any] = 0.95
elif size == "m48":
snake_case__ : Optional[int] = [8, 8, 24, 8]
snake_case__ : List[str] = [96, 192, 384, 768]
snake_case__ : str = 4.0
snake_case__ : str = 1E-6
snake_case__ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case__ : Optional[Any] = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
# Prepare image
snake_case__ : Optional[int] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case__ : List[str] = torch.load(__lowerCAmelCase , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case__ : str = rename_keys(__lowerCAmelCase )
# create HuggingFace model and load state dict
snake_case__ : List[str] = PoolFormerForImageClassification(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Define image processor
snake_case__ : int = PoolFormerImageProcessor(crop_pct=__lowerCAmelCase )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case__ : Dict = model(__lowerCAmelCase )
snake_case__ : str = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case__ : Tuple = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
snake_case__ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
snake_case__ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
snake_case__ : Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
snake_case__ : List[str] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 | 1 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase_ ( ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 9, 14 # noqa: F841
lowerCAmelCase_ : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase_ : List[Any] = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase_ : Optional[int] = mst(_lowerCAmelCase )
lowerCAmelCase_ : Dict = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase_ : List[Any] = tuple(answer[:2] )
lowerCAmelCase_ : Dict = tuple(edge[::-1] )
assert edge in result or reverse in result
| 224 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase : Optional[int] = 50_00_00
lowerCAmelCase , lowerCAmelCase : Any = os.path.split(__file__)
lowerCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def A_( A : datasets.Dataset , **A : Optional[int]):
UpperCamelCase = dataset.map(**A)
@get_duration
def A_( A : datasets.Dataset , **A : int):
UpperCamelCase = dataset.filter(**A)
def A_( ):
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = datasets.Features({'text': datasets.Value('string'), 'numbers': datasets.Value('float32')})
UpperCamelCase = generate_example_dataset(
os.path.join(A , 'dataset.arrow') , A , num_examples=A)
UpperCamelCase = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=A)
def tokenize(A : str):
return tokenizer(examples['text'])
UpperCamelCase = map(A)
UpperCamelCase = map(A , batched=A)
UpperCamelCase = map(A , function=lambda A: None , batched=A)
with dataset.formatted_as(type='numpy'):
UpperCamelCase = map(A , function=lambda A: None , batched=A)
with dataset.formatted_as(type='pandas'):
UpperCamelCase = map(A , function=lambda A: None , batched=A)
with dataset.formatted_as(type='torch' , columns='numbers'):
UpperCamelCase = map(A , function=lambda A: None , batched=A)
with dataset.formatted_as(type='tensorflow' , columns='numbers'):
UpperCamelCase = map(A , function=lambda A: None , batched=A)
UpperCamelCase = map(A , function=A , batched=A)
UpperCamelCase = filter(A)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A , 'wb') as f:
f.write(json.dumps(A).encode('utf-8'))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 251 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """deberta-v2"""
def __init__( self , A_=128100 , A_=1536 , A_=24 , A_=24 , A_=6144 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0 , A_=0.02 , A_=1e-7 , A_=False , A_=-1 , A_=0 , A_=True , A_=None , A_=0 , A_="gelu" , **A_ , )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(A_ ) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split('|' )]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get('pooler_hidden_size' , A_ )
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return 12
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 251 | 1 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowercase : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict =n
SCREAMING_SNAKE_CASE__ : Any =[None] * self.n
SCREAMING_SNAKE_CASE__ : Optional[int] =0 # index of the first element
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : Optional[int] =0
def __len__( self : Any ) -> int:
return self.size
def __magic_name__ ( self : Dict ) -> bool:
return self.size == 0
def __magic_name__ ( self : str ) -> int:
return False if self.is_empty() else self.array[self.front]
def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] ) -> Union[str, Any]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
SCREAMING_SNAKE_CASE__ : str =data
SCREAMING_SNAKE_CASE__ : Optional[int] =(self.rear + 1) % self.n
self.size += 1
return self
def __magic_name__ ( self : List[str] ) -> int:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
SCREAMING_SNAKE_CASE__ : Any =self.array[self.front]
SCREAMING_SNAKE_CASE__ : Optional[int] =None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.front + 1) % self.n
self.size -= 1
return temp | 152 |
'''simple docstring'''
from PIL import Image
def _a( UpperCamelCase__ : Image, UpperCamelCase__ : float ):
'''simple docstring'''
def brightness(UpperCamelCase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a_ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png') | 152 | 1 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCamelCase_ = logging.get_logger(__name__)
class __A:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
@experimental
def __magic_name__ ( __a : int , __a : List[str] , __a : Optional[int] , __a : List[str] , __a : List[str] , __a : Optional[Any] , __a : Union[str, Any] ):
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__a , __a , __a , __a , __a , __a , __a )
return _map_with_joblib(__a , __a , __a , __a , __a , __a , __a )
def __magic_name__ ( __a : Union[str, Any] , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : Dict , __a : Tuple , __a : int ):
'''simple docstring'''
UpperCamelCase__ = num_proc if num_proc <= len(__a ) else len(__a )
UpperCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__a ):
UpperCamelCase__ = len(__a ) // num_proc
UpperCamelCase__ = len(__a ) % num_proc
UpperCamelCase__ = div * index + min(__a , __a )
UpperCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(__a )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(__a )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
UpperCamelCase__ , UpperCamelCase__ = None, None
if not disable_tqdm:
UpperCamelCase__ , UpperCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__a , initargs=__a , initializer=__a ) as pool:
UpperCamelCase__ = pool.map(__a , __a )
logger.info(f"Finished {num_proc} processes" )
UpperCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(__a )} objects" )
return mapped
def __magic_name__ ( __a : str , __a : Optional[int] , __a : str , __a : Optional[int] , __a : Optional[Any] , __a : Any , __a : Optional[int] ):
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__a ):
return joblib.Parallel()(
joblib.delayed(__a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCamelCase__ = None
| 178 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = np.array(__a )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase_ (self ):
pass
@slow
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """facebook/sam-vit-huge"""
UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] , )
| 178 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = num_of_nodes
lowercase__ = []
lowercase__ = {}
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__ = self.find_component(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
lowercase__ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
lowercase__ = self.find_component(lowerCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase__ )
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = []
lowercase__ = 0
lowercase__ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ , lowercase__ , lowercase__ = edge
lowercase__ = self.m_component[u]
lowercase__ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase__ = [-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A ( __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = """MCTCTFeatureExtractor"""
lowerCamelCase : Dict = """AutoTokenizer"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self.feature_extractor
lowercase__ = False
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
lowercase__ = kwargs.pop("""raw_speech""" )
else:
lowercase__ = kwargs.pop("""audio""" , lowerCamelCase__ )
lowercase__ = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
lowercase__ = kwargs.pop("""text""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
lowercase__ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
lowercase__ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings["""input_ids"""]
return inputs
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
lowercase__ = kwargs.pop("""input_features""" , lowerCamelCase__ )
lowercase__ = kwargs.pop("""labels""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if input_features is not None:
lowercase__ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
lowercase__ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__ = labels["""input_ids"""]
return input_features
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def A__ ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 164 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )-> Dict:
lowerCamelCase_ =None
lowerCamelCase_ =20
lowerCamelCase_ =self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
lowerCamelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ =jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
lowerCamelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )-> Any:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create ramp distribution
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ =5
lowerCamelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ =top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =None
lowerCamelCase_ =10
lowerCamelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ =np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ =np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ =ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
lowerCamelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ =5
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =15
lowerCamelCase_ =min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ =1
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> int:
lowerCamelCase_ =20
lowerCamelCase_ =4
lowerCamelCase_ =0
lowerCamelCase_ =5
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ =4
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ =3
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =4
lowerCamelCase_ =10
lowerCamelCase_ =15
lowerCamelCase_ =2
lowerCamelCase_ =1
lowerCamelCase_ =15
# dummy input_ids and scores
lowerCamelCase_ =ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =input_ids.copy()
lowerCamelCase_ =self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =scores.copy()
# instantiate all dist processors
lowerCamelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ =FlaxTopKLogitsWarper(3 )
lowerCamelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ =processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jax.jit(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowercase_ = {
'facebook/blenderbot_small-90M': 5_1_2,
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , lowerCAmelCase=True , **lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase , merges=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , ) , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , **lowerCAmelCase , )
_lowercase =add_prefix_space
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> str:
'''simple docstring'''
_lowercase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 205 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(4_2)
lowercase_ = 'sshleifer/student_marian_en_ro_6_1'
lowercase_ = 'sshleifer/tiny-mbart'
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , ) -> Dict:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase , num_train_epochs=1 , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , predict_with_generate=lowerCAmelCase , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , )
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowercase =eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@require_torch_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowerCAmelCase )
@require_apex
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase ={
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_lowercase =experiments[experiment_id]
_lowercase ={'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_lowercase ='Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase , extra_args_str=data['extra_args_str'] )
_lowercase =len(re.findall(lowerCAmelCase , cl.err ) )
self.assertEqual(lowerCAmelCase , data['n_matches'] )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCAmelCase , )
# Check metrics
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
_lowercase =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
# test if do_predict saves generations and metrics
_lowercase =os.listdir(lowerCAmelCase )
_lowercase ={os.path.basename(lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A__ ( self ) -> List[str]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase ) -> Tuple[int, float]:
_lowercase ='--skip_memory_metrics 0'
_lowercase =self.run_trainer(
max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCAmelCase , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
_lowercase =TrainerState.load_from_json(Path(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
_lowercase =int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
_lowercase =logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_lowercase =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowercase =gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowercase =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowercase =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowercase =120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3e-3 , lowerCAmelCase = "adafactor" , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = 0 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = None , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_lowercase =self.get_auto_remove_tmp_dir()
_lowercase =F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_lowercase =F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase )}
'''.split()
_lowercase ='\n --do_predict\n '.split()
_lowercase =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowercase =get_gpu_count()
_lowercase =get_torch_dist_unique_port()
_lowercase =F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_lowercase =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase , env=self.get_env() )
else:
_lowercase =['run_translation.py'] + args
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ):
main()
return output_dir
| 205 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@register_to_config
def __init__( self ,_snake_case = 32 ,_snake_case = 64 ,_snake_case = 20 ,_snake_case = 7_68 ,_snake_case=77 ,_snake_case=4 ,_snake_case = 0.0 ,_snake_case = "silu" ,_snake_case = None ,_snake_case = None ,_snake_case = "linear" ,_snake_case = "prd" ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : List[str] = attention_head_dim
UpperCAmelCase_ : List[str] = num_attention_heads * attention_head_dim
UpperCAmelCase_ : List[Any] = additional_embeddings
UpperCAmelCase_ : Union[str, Any] = time_embed_dim or inner_dim
UpperCAmelCase_ : Tuple = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : Dict = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Dict = Timesteps(_snake_case ,_snake_case ,0 )
UpperCAmelCase_ : Optional[Any] = TimestepEmbedding(_snake_case ,_snake_case ,out_dim=_snake_case ,act_fn=_snake_case )
UpperCAmelCase_ : Tuple = nn.Linear(_snake_case ,_snake_case )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : Any = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : Optional[Any] = nn.LayerNorm(_snake_case )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : List[str] = nn.Linear(_snake_case ,_snake_case )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : str = nn.Linear(_snake_case ,_snake_case )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Tuple = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_snake_case ) )
if added_emb_type == "prd":
UpperCAmelCase_ : int = nn.Parameter(torch.zeros(1 ,1 ,_snake_case ) )
elif added_emb_type is None:
UpperCAmelCase_ : Union[str, Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
_snake_case ,_snake_case ,_snake_case ,dropout=_snake_case ,activation_fn="gelu" ,attention_bias=_snake_case ,)
for d in range(_snake_case )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : Dict = nn.LayerNorm(_snake_case )
elif norm_in_type is None:
UpperCAmelCase_ : List[str] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : int = nn.LayerNorm(_snake_case )
UpperCAmelCase_ : int = nn.Linear(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : List[str] = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" ,_snake_case ,persistent=_snake_case )
UpperCAmelCase_ : Any = nn.Parameter(torch.zeros(1 ,_snake_case ) )
UpperCAmelCase_ : str = nn.Parameter(torch.zeros(1 ,_snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = {}
def fn_recursive_add_processors(_snake_case ,_snake_case ,_snake_case ):
if hasattr(_snake_case ,"set_processor" ):
UpperCAmelCase_ : Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_snake_case ,_snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_snake_case ,_snake_case ,_snake_case )
return processors
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_snake_case ,_snake_case ) and len(_snake_case ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_snake_case )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_snake_case ,_snake_case ,_snake_case ):
if hasattr(_snake_case ,"set_processor" ):
if not isinstance(_snake_case ,_snake_case ):
module.set_processor(_snake_case )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_snake_case ,_snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(_snake_case ,_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = True ,):
UpperCAmelCase_ : str = hidden_states.shape[0]
UpperCAmelCase_ : Optional[Any] = timestep
if not torch.is_tensor(_snake_case ):
UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[Any] = timesteps * torch.ones(_snake_case ,dtype=timesteps.dtype ,device=timesteps.device )
UpperCAmelCase_ : int = self.time_proj(_snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : str = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : Optional[int] = self.time_embedding(_snake_case )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : Optional[int] = self.embedding_proj_norm(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.embedding_proj(_snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Union[str, Any] = self.encoder_hidden_states_proj(_snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ : str = self.proj_in(_snake_case )
UpperCAmelCase_ : Dict = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : Optional[int] = hidden_states[:, None, :]
UpperCAmelCase_ : Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Tuple = self.prd_embedding.to(hidden_states.dtype ).expand(_snake_case ,-1 ,-1 )
additional_embeds.append(_snake_case )
UpperCAmelCase_ : str = torch.cat(
_snake_case ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : Tuple = F.pad(
_snake_case ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
UpperCAmelCase_ : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : int = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ : Optional[int] = F.pad(_snake_case ,(0, self.additional_embeddings) ,value=0.0 )
UpperCAmelCase_ : Any = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : Union[str, Any] = self.norm_in(_snake_case )
for block in self.transformer_blocks:
UpperCAmelCase_ : str = block(_snake_case ,attention_mask=_snake_case )
UpperCAmelCase_ : str = self.norm_out(_snake_case )
if self.prd_embedding is not None:
UpperCAmelCase_ : Tuple = hidden_states[:, -1]
else:
UpperCAmelCase_ : Any = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : Tuple = self.proj_to_clip_embeddings(_snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 67 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ : Union[str, Any] = value
else:
UpperCAmelCase_ : int = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Dict = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : Dict = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : Dict = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : List[str] = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : Optional[int] = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : int = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[-2_56:]
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size
UpperCAmelCase_ : int = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 8_00 if "detection" in checkpoint_url else 10_00
UpperCAmelCase_ : str = target_max_size / current_max_size
UpperCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = F.to_tensor(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Any = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : str = 15
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Union[str, Any] = {0: "table", 1: "table rotated"}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Tuple = 1_25
UpperCAmelCase_ : Tuple = 6
UpperCAmelCase_ : Union[str, Any] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
UpperCAmelCase_ : Optional[int] = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
UpperCAmelCase_ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase_ : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
UpperCAmelCase_ : int = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCAmelCase_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCAmelCase_ : Union[str, Any] = (1, 1_25, 7)
UpperCAmelCase_ : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase_ : List[str] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 67 | 1 |
from statistics import mean, stdev
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int = 3 ):
__lowerCAmelCase = min(lowerCAmelCase_ )
__lowerCAmelCase = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min), lowerCAmelCase_ ) for x in data]
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int = 3 ):
__lowerCAmelCase = mean(lowerCAmelCase_ )
__lowerCAmelCase = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma), lowerCAmelCase_ ) for x in data]
| 284 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _A ( unittest.TestCase ):
def __init__( self : List[Any] , _A : str , _A : Optional[int]=13 , _A : Dict=7 , _A : Tuple=True , _A : Any=True , _A : List[str]=True , _A : int=True , _A : List[Any]=99 , _A : int=32 , _A : Any=5 , _A : List[str]=4 , _A : int=37 , _A : Optional[Any]="gelu" , _A : str=0.1 , _A : Any=0.1 , _A : List[str]=512 , _A : str=16 , _A : str=2 , _A : List[str]=0.02 , _A : List[Any]=4 , ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] = parent
lowercase : List[Any] = batch_size
lowercase : List[str] = seq_length
lowercase : Optional[int] = is_training
lowercase : int = use_attention_mask
lowercase : Any = use_token_type_ids
lowercase : List[str] = use_labels
lowercase : List[Any] = vocab_size
lowercase : List[str] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : List[str] = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Union[str, Any] = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : Optional[Any] = initializer_range
lowercase : str = num_choices
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[str] = None
if self.use_attention_mask:
lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Union[str, Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self : str ) -> Tuple:
"""simple docstring"""
lowercase : List[str] = FlaxAlbertModelTester(self )
@slow
def __a ( self : int ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase : Tuple = model_class_name.from_pretrained('''albert-base-v2''' )
lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class _A ( unittest.TestCase ):
@slow
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : Union[str, Any] = model(_A , attention_mask=_A )[0]
lowercase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , _A )
lowercase : List[str] = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) ) | 116 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __a ( self : Any , _A : Dict , _A : Any , _A : Any=None , _A : Any="uniform_average" , _A : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowercase : Any = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 116 | 1 |
def _A ( SCREAMING_SNAKE_CASE : int = 1_000_000 ):
"""simple docstring"""
a__ : Optional[int] =[i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 95 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
SCREAMING_SNAKE_CASE_ = [p / w for p, w in zip(__lowerCamelCase, __lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
SCREAMING_SNAKE_CASE_ = sorted(__lowerCamelCase )
# declaring useful variables
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
SCREAMING_SNAKE_CASE_ = sorted_profit_by_weight[length - i - 1]
SCREAMING_SNAKE_CASE_ = profit_by_weight.index(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
__UpperCAmelCase = [int(x) for x in input("Input profits separated by spaces: ").split()]
__UpperCAmelCase = [int(x) for x in input("Input weights separated by spaces: ").split()]
__UpperCAmelCase = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 257 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 257 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A red cat sitting on a park bench'
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
_UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = 'A red cat sitting on a park bench'
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images
_UpperCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 39 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = {} # Mapping from char to TrieNode
_UpperCAmelCase = False
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
for word in words:
self.insert(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase = TrieNode()
_UpperCAmelCase = curr.nodes[char]
_UpperCAmelCase = True
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
def _delete(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase = False
return len(curr.nodes ) == 0
_UpperCAmelCase = word[index]
_UpperCAmelCase = curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase = _delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
if node.is_leaf:
print(__lowerCAmelCase , end=' ' )
for key, value in node.nodes.items():
print_words(__lowerCAmelCase , word + key )
def __A ( )-> bool:
"""simple docstring"""
_UpperCAmelCase = 'banana bananas bandana band apple all beast'.split()
_UpperCAmelCase = TrieNode()
root.insert_many(__lowerCAmelCase )
# print_words(root, "")
assert all(root.find(__lowerCAmelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
print(str(__lowerCAmelCase ) , 'works!' if passes else 'doesn\'t work :(' )
def __A ( )-> None:
"""simple docstring"""
assert test_trie()
def __A ( )-> None:
"""simple docstring"""
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 39 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Dict=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCAmelCase_ ( self : Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Union[str, Any] = DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = DPTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 363 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {'vocab_file': 'spiece.model'}
_a : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_a : str = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[Any] = [F"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
_lowerCAmelCase : str = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Any = extra_ids
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def __A ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , a__ , )
return max_model_length
@property
def __A ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self ):
return list(
set(filter(lambda a__ : bool(re.search(r"""<extra_id_\d+>""" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def __A ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , """ """ )
return super().tokenize(a__ , **a__ )
def __A ( self , a__ , **a__ ):
if not self.legacy:
_lowerCAmelCase : int = text.startswith(a__ )
if is_first:
_lowerCAmelCase : Tuple = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(a__ ):
_lowerCAmelCase : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __A ( self , a__ ):
if token.startswith("""<extra_id_""" ):
_lowerCAmelCase : Any = re.match(r"""<extra_id_(\d+)>""" , a__ )
_lowerCAmelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def __A ( self , a__ ):
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a__ )
else:
_lowerCAmelCase : Union[str, Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = """"""
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Tuple = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 44 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase : Union[str, Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ):
__a : Dict = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__a : str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ):
__a : Any = []
for _ in range(_SCREAMING_SNAKE_CASE ):
# Create output image
__a : Optional[Any] = Image.new('RGB' , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) )
__a : Any = img.load()
# Save cells to image
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__a : Any = 255 - cells[y][x] * 255
__a : Optional[Any] = (colour, colour, colour)
# Save image
images.append(_SCREAMING_SNAKE_CASE )
__a : str = new_generation(_SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase : Optional[Any] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 294 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(__UpperCamelCase ,x % y )
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(__UpperCamelCase ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: int = 20 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 1
for i in range(1 ,n + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = lcm(__UpperCamelCase ,__UpperCamelCase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "LayoutLMv3ImageProcessor"
__A = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a = self.image_processor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = [text] # add batch dimension (as the image processor always adds a batch dimension)
a = features["words"]
a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel values
a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
a = self.get_overflowing_images(lowerCamelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
a = images
return encoded_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}''' )
return images_with_overflow
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 358 |
def a( A : int = 200 ) -> int:
"""simple docstring"""
a = [1, 2, 5, 10, 20, 50, 100, 200]
a = [0] * (pence + 1)
a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 71 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = ["model.decoder.embed_positions.weights"]
def __UpperCAmelCase ( a_):
if "emb" in name:
snake_case_ = name.replace('emb' , 'model.decoder.embed_tokens')
if "transformer" in name:
snake_case_ = name.replace('transformer' , 'model.decoder')
if "cross_attention" in name:
snake_case_ = name.replace('cross_attention' , 'encoder_attn')
if "linear1" in name:
snake_case_ = name.replace('linear1' , 'fc1')
if "linear2" in name:
snake_case_ = name.replace('linear2' , 'fc2')
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'self_attn_layer_norm')
if "norm_cross" in name:
snake_case_ = name.replace('norm_cross' , 'encoder_attn_layer_norm')
if "norm2" in name:
snake_case_ = name.replace('norm2' , 'final_layer_norm')
if "out_norm" in name:
snake_case_ = name.replace('out_norm' , 'model.decoder.layer_norm')
if "linears" in name:
snake_case_ = name.replace('linears' , 'lm_heads')
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj')
return name
def __UpperCAmelCase ( a_ , a_):
snake_case_ = list(state_dict.keys())
snake_case_ = {}
for key in keys:
snake_case_ = state_dict.pop(a_)
snake_case_ = rename_keys(a_)
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ = val[:hidden_size, :]
snake_case_ = val[hidden_size : 2 * hidden_size, :]
snake_case_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ = val
else:
snake_case_ = val
return state_dict, enc_dec_proj_state_dict
def __UpperCAmelCase ( a_):
if checkpoint == "small":
# default config values
snake_case_ = 10_24
snake_case_ = 24
snake_case_ = 16
elif checkpoint == "medium":
snake_case_ = 15_36
snake_case_ = 48
snake_case_ = 24
elif checkpoint == "large":
snake_case_ = 20_48
snake_case_ = 48
snake_case_ = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''')
snake_case_ = MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_=None , a_=None , a_="cpu"):
snake_case_ = MusicGen.get_pretrained(a_ , device=a_)
snake_case_ = decoder_config_from_checkpoint(a_)
snake_case_ = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ = rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size)
snake_case_ = TaEncoderModel.from_pretrained('t5-base')
snake_case_ = EncodecModel.from_pretrained('facebook/encodec_32khz')
snake_case_ = MusicgenForCausalLM(a_).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ = decoder.load_state_dict(a_ , strict=a_)
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder')) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_)
if len(a_) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''')
if len(a_) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''')
# init the composite model
snake_case_ = MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_)
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_)
# check we can do a forward pass
snake_case_ = torch.arange(0 , 8 , dtype=torch.long).reshape(2 , -1)
snake_case_ = input_ids.reshape(2 * 4 , -1)
with torch.no_grad():
snake_case_ = model(input_ids=a_ , decoder_input_ids=a_).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits')
# now construct the processor
snake_case_ = AutoTokenizer.from_pretrained('t5-base')
snake_case_ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left')
snake_case_ = MusicgenProcessor(feature_extractor=a_ , tokenizer=a_)
# set the appropriate bos/pad token ids
snake_case_ = 20_48
snake_case_ = 20_48
# set other default generation config params
snake_case_ = int(30 * audio_encoder.config.frame_rate)
snake_case_ = True
snake_case_ = 3.0
if pytorch_dump_folder is not None:
Path(a_).mkdir(exist_ok=a_)
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''')
model.save_pretrained(a_)
processor.save_pretrained(a_)
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''')
model.push_to_hub(a_)
processor.push_to_hub(a_)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 178 |
from __future__ import annotations
def __UpperCAmelCase ( a_ , a_ , a_ , a_): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __UpperCAmelCase ( a_):
if len(a_) == 0:
return 0
snake_case_ = [0] * len(a_)
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(a_)):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase__ : List[Any] = get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0) -> List[Any]:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
a = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''')
torch.save(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Model saved to {output_model_file}''')
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Saving model to {output_model_file}''')
torch.save(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Model saved to {output_model_file}''')
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a = os.path.join(__UpperCamelCase , f'''{MODEL_NAME}_{model_index}''')
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
logger.info(f'''Saving model to {ckpt_dir}''')
a = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0) -> Optional[int]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object")
return
a = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Loading model from {input_model_file}''')
a = torch.load(__UpperCamelCase)
logger.info(f'''Model loaded from {input_model_file}''')
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
a = (
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Loading model from {input_model_file}''')
a = torch.load(__UpperCamelCase)
logger.info(f'''Model loaded from {input_model_file}''')
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
a = (
os.path.join(__UpperCamelCase , f'''{MODEL_NAME}_{model_index}''')
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''')
a = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase) , planner=DefaultLoadPlanner() , )
a = state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''')
model.load_state_dict(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0) -> List[Any]:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
a = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase)
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
a = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''')
torch.save(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Optimizer state saved in {output_optimizer_file}''')
else:
a = os.path.join(__UpperCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''')
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase)
logger.info(f'''Saving Optimizer state to {ckpt_dir}''')
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
a = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
a = (
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
a = os.path.join(__UpperCamelCase , __UpperCamelCase)
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''')
a = torch.load(__UpperCamelCase)
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''')
else:
a = (
os.path.join(__UpperCamelCase , f'''{OPTIMIZER_NAME}_{optimizer_index}''')
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''')
a = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase) , )
a = optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''')
a = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
optimizer.load_state_dict(__UpperCamelCase)
| 365 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class a__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : Any = """resnet"""
a : Tuple = ["""basic""", """bottleneck"""]
def __init__( self , A=3 , A=64 , A=[256, 512, 1024, 2048] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> Any:
'''simple docstring'''
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
a = num_channels
a = embedding_size
a = hidden_sizes
a = depths
a = layer_type
a = hidden_act
a = downsample_in_first_stage
a = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class a__ ( UpperCamelCase__ ):
a : Optional[int] = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 180 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_a = logging.getLogger(__name__)
def __A ( )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_UpperCAmelCase , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_UpperCAmelCase , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_UpperCAmelCase , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_UpperCAmelCase , default=1_000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_UpperCAmelCase , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_UpperCAmelCase , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
def fn(__lowerCAmelCase ):
return tokenizer(examples['text'] )
return fn
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=_UpperCAmelCase )
_UpperCAmelCase = tf.train.Example(features=_UpperCAmelCase )
_UpperCAmelCase = example.SerializeToString()
records.append(_UpperCAmelCase )
return records
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(_UpperCAmelCase ) , args.limit )
_UpperCAmelCase = dataset.select(range(_UpperCAmelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(_UpperCAmelCase )
_UpperCAmelCase = dataset.map(_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCAmelCase ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , _UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(_UpperCAmelCase , batched=_UpperCAmelCase , batch_size=1_000 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(_UpperCAmelCase ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_UpperCAmelCase = get_serialized_examples(_UpperCAmelCase )
with tf.io.TFRecordWriter(_UpperCAmelCase ) as out_file:
for i in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(_UpperCAmelCase )
print('Wrote file {} containing {} records'.format(_UpperCAmelCase , _UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , 'w' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=_UpperCAmelCase )
if __name__ == "__main__":
_a = parse_args()
main(args)
| 39 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__snake_case :Optional[int] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__snake_case :List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__snake_case :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = len([g for position, g in enumerate(_UpperCAmelCase ) if g == main_target[position]] )
return (item, float(_UpperCAmelCase ))
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 )
__a = parent_a[:random_slice] + parent_a[random_slice:]
__a = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(_UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__a = random.choice(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = []
# Generate more children proportionally to the fitness score.
__a = int(parent_a[1] * 100 ) + 1
__a = 10 if child_n >= 10 else child_n
for _ in range(_UpperCAmelCase ):
__a = population_score[random.randint(0 , _UpperCAmelCase )][0]
__a , __a = crossover(parent_a[0] , _UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
pop.append(mutate(_UpperCAmelCase , _UpperCAmelCase ) )
return pop
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__a = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__a = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__a = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_UpperCAmelCase )
# Generate random starting population.
__a = []
for _ in range(_UpperCAmelCase ):
population.append(''''''.join([random.choice(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__a , __a = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__a = [evaluate(_UpperCAmelCase , _UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[1] , reverse=_UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__a = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__a = [
(item, score / len(_UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCAmelCase ):
population.extend(select(population_score[int(_UpperCAmelCase )] , _UpperCAmelCase , _UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__snake_case :Optional[int] = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__snake_case :List[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__snake_case ,__snake_case ,__snake_case :Dict = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 49 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase_ ( lowerCamelCase__=None ):
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser("test" )
else:
lowerCamelCase_ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=__a , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
lowerCamelCase_ = script_name
else:
lowerCamelCase_ = F'--config_file={args.config_file} {script_name}'
lowerCamelCase_ = ['''accelerate-launch'''] + test_args.split()
lowerCamelCase_ = execute_subprocess_async(__a , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCamelCase_ ( ):
lowerCamelCase_ = test_command_parser()
lowerCamelCase_ = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 365 |
from sklearn.metrics import recall_score
import datasets
__A ='''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__A ='''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__A ='''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase=1 , lowercase="binary" , lowercase=None , lowercase="warn" , ) -> Optional[int]:
lowerCamelCase_ = recall_score(
lowercase , lowercase , labels=lowercase , pos_label=lowercase , average=lowercase , sample_weight=lowercase , zero_division=lowercase , )
return {"recall": float(lowercase ) if score.size == 1 else score}
| 47 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
return str(UpperCamelCase__ ) == str(UpperCamelCase__ )[::-1]
def __lowerCAmelCase ( UpperCamelCase__ ) -> int:
return int(UpperCamelCase__ ) + int(str(UpperCamelCase__ )[::-1] )
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00 ) -> int:
__lowerCamelCase = []
for num in range(1 , UpperCamelCase__ ):
__lowerCamelCase = 0
__lowerCamelCase = num
while iterations < 50:
__lowerCamelCase = sum_reverse(UpperCamelCase__ )
iterations += 1
if is_palindrome(UpperCamelCase__ ):
break
else:
lychrel_nums.append(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , A : Optional[int] , A : List[Any]=1_2 , A : str=7 , A : int=True , A : Optional[Any]=True , A : Dict=True , A : Tuple=9_9 , A : Dict=3_2 , A : str=3_2 , A : int=2 , A : str=4 , A : Optional[Any]=3_7 , A : Tuple=0.1 , A : Optional[int]=0.1 , A : Tuple=5_1_2 , A : Optional[int]=0.02 , A : List[str]=0 , A : List[str]=None , ) ->int:
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : int = use_input_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : List[Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[int] = projection_dim
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : List[Any] = dropout
lowerCamelCase__ : int = attention_dropout
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[str] = scope
lowerCamelCase__ : Tuple = bos_token_id
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase__ : Optional[Any] = input_mask.numpy()
lowerCamelCase__ : Optional[Any] = input_mask.shape
lowerCamelCase__ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(A ):
lowerCamelCase__ : str = 1
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(A )
def __lowerCamelCase ( self : Dict ) ->Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowerCamelCase ( self : Optional[Any] , A : Union[str, Any] , A : Union[str, Any] , A : str ) ->Dict:
lowerCamelCase__ : Union[str, Any] = TFBlipTextModel(config=A )
lowerCamelCase__ : Any = model(A , attention_mask=A , training=A )
lowerCamelCase__ : Dict = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self : Any ) ->Tuple:
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : int = False
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : Optional[Any] = BlipTextModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=A , hidden_size=3_7 )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Optional[Any] ) ->Union[str, Any]:
pass
def __lowerCamelCase ( self : str ) ->Dict:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowerCamelCase ( self : Dict ) ->List[str]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCamelCase ( self : Tuple ) ->str:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
pass
@slow
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = TFBlipTextModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( self : Any , A : Tuple=True ) ->Dict:
super().test_pt_tf_model_equivalence(allow_missing_keys=A )
| 359 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:str = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "vit_mae"
def __init__( self, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=224, lowerCamelCase__=16, lowerCamelCase__=3, lowerCamelCase__=True, lowerCamelCase__=16, lowerCamelCase__=512, lowerCamelCase__=8, lowerCamelCase__=2048, lowerCamelCase__=0.75, lowerCamelCase__=False, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Dict = hidden_size
A : List[str] = num_hidden_layers
A : Any = num_attention_heads
A : str = intermediate_size
A : str = hidden_act
A : str = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : int = initializer_range
A : List[Any] = layer_norm_eps
A : List[Any] = image_size
A : Optional[int] = patch_size
A : int = num_channels
A : List[Any] = qkv_bias
A : Union[str, Any] = decoder_num_attention_heads
A : Any = decoder_hidden_size
A : List[Any] = decoder_num_hidden_layers
A : Optional[Any] = decoder_intermediate_size
A : List[Any] = mask_ratio
A : Dict = norm_pix_loss
| 116 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_:str = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Union[str, Any] = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : Optional[int] , lowercase_ : Dict , lowercase_ : List[str]=2 , lowercase_ : List[Any]=True , lowercase_ : List[str]=False , lowercase_ : Any=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=32 * 4 , lowercase_ : Dict=32 * 6 , lowercase_ : int=4 , lowercase_ : int=32 , ):
snake_case_ : Optional[Any] = parent
snake_case_ : str = batch_size
snake_case_ : Optional[int] = is_training
snake_case_ : Dict = use_auxiliary_loss
snake_case_ : Dict = num_queries
snake_case_ : List[Any] = num_channels
snake_case_ : Dict = min_size
snake_case_ : List[Any] = max_size
snake_case_ : int = num_labels
snake_case_ : str = mask_feature_size
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
snake_case_ : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
snake_case_ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
snake_case_ : str = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self : Any ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _snake_case ( self : Any ):
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = self.prepare_config_and_inputs()
snake_case_ : int = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _snake_case ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] ):
snake_case_ : Optional[int] = output.encoder_hidden_states
snake_case_ : Optional[int] = output.pixel_decoder_hidden_states
snake_case_ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def _snake_case ( self : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int=False ):
with torch.no_grad():
snake_case_ : int = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ : Dict = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] ):
snake_case_ : Dict = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ : Optional[int] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
snake_case_ : List[Any] = model(lowercase_ )
comm_check_on_output(lowercase_ )
snake_case_ : Union[str, Any] = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[int] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCAmelCase : List[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[Any] = False
def _snake_case ( self : int ):
snake_case_ : str = MaskFormerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Union[str, Any] ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def _snake_case ( self : Dict ):
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def _snake_case ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self : int ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self : int ):
pass
def _snake_case ( self : Optional[Any] ):
snake_case_, snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(lowercase_ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case_ : Union[str, Any] = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Optional[int] = (self.model_tester.min_size,) * 2
snake_case_ : Dict = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowercase_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowercase_ ),
'''class_labels''': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
snake_case_ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
snake_case_ : str = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self : int ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def _snake_case ( self : str ):
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(lowercase_ ).to(lowercase_ )
snake_case_ : int = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ : Any = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
snake_case_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ : Dict = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def _snake_case ( self : Optional[int] ):
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ : Optional[int] = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case_ : Optional[int] = True
snake_case_ : List[str] = True
snake_case_ : Tuple = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
snake_case_ : int = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
snake_case_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case_ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ : List[str] = 1e-4
def __lowercase ( ):
snake_case_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : Tuple ):
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def _snake_case ( self : List[str] ):
snake_case_ : int = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowercase_ )
snake_case_ : List[str] = self.default_image_processor
snake_case_ : int = prepare_img()
snake_case_ : Optional[int] = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**lowercase_ )
snake_case_ : Dict = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ : List[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
snake_case_ : Tuple = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def _snake_case ( self : Tuple ):
snake_case_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowercase_ )
.eval()
)
snake_case_ : Union[str, Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ : Optional[Any] = model(**lowercase_ )
# masks_queries_logits
snake_case_ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ : Tuple = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
snake_case_ : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
snake_case_ : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ : Union[str, Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowercase_ )
.eval()
)
snake_case_ : List[str] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Dict = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
snake_case_ : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1088) )
with torch.no_grad():
snake_case_ : int = model(**lowercase_ )
# masks_queries_logits
snake_case_ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
snake_case_ : Tuple = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
snake_case_ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ : Tuple = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def _snake_case ( self : str ):
snake_case_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowercase_ )
.eval()
)
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case_ : str = inputs['''pixel_values'''].to(lowercase_ )
snake_case_ : Optional[Any] = [el.to(lowercase_ ) for el in inputs['''mask_labels''']]
snake_case_ : Union[str, Any] = [el.to(lowercase_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case_ : str = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 155 |
"""simple docstring"""
from collections.abc import Generator
def __lowercase ( ):
snake_case_, snake_case_ : List[str] = 0, 1
while True:
snake_case_, snake_case_ : List[str] = b, a + b
yield b
def __lowercase ( _a = 1_000 ):
snake_case_ : Tuple = 1
snake_case_ : List[str] = fibonacci_generator()
while len(str(next(_a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 155 | 1 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Dict = size if size is not None else {"shortest_edge": 224}
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Any = crop_size if crop_size is not None else {"height": 256, "width": 256}
_snake_case : str = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : List[Any] = do_resize
_snake_case : Tuple = size
_snake_case : Union[str, Any] = resample
_snake_case : str = do_rescale
_snake_case : Dict = rescale_factor
_snake_case : int = do_center_crop
_snake_case : int = crop_size
_snake_case : List[Any] = do_flip_channel_order
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PIL.Image.BILINEAR , lowercase_ = None , **lowercase_ , ):
_snake_case : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case : int = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
_snake_case : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
_snake_case : Dict = do_resize if do_resize is not None else self.do_resize
_snake_case : Union[str, Any] = resample if resample is not None else self.resample
_snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_snake_case : Optional[int] = size if size is not None else self.size
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
_snake_case : Union[str, Any] = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_snake_case : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_snake_case : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_snake_case : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_snake_case : int = [self.flip_channel_order(image=lowercase_ ) for image in images]
_snake_case : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_snake_case : List[Any] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase_ ):
_snake_case : Union[str, Any] = target_sizes.numpy()
_snake_case : int = []
for idx in range(len(lowercase_ ) ):
_snake_case : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ )
_snake_case : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
_snake_case : List[Any] = logits.argmax(dim=1 )
_snake_case : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 284 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = True , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : Dict = size if size is not None else {"shortest_edge": 224}
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Any = crop_size if crop_size is not None else {"height": 256, "width": 256}
_snake_case : str = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : List[Any] = do_resize
_snake_case : Tuple = size
_snake_case : Union[str, Any] = resample
_snake_case : str = do_rescale
_snake_case : Dict = rescale_factor
_snake_case : int = do_center_crop
_snake_case : int = crop_size
_snake_case : List[Any] = do_flip_channel_order
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PIL.Image.BILINEAR , lowercase_ = None , **lowercase_ , ):
_snake_case : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case : int = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
_snake_case : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
_snake_case : Dict = do_resize if do_resize is not None else self.do_resize
_snake_case : Union[str, Any] = resample if resample is not None else self.resample
_snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_snake_case : Optional[int] = size if size is not None else self.size
_snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
_snake_case : Union[str, Any] = get_size_dict(lowercase_ , param_name="crop_size" )
_snake_case : Tuple = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_snake_case : Tuple = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_snake_case : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_snake_case : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_snake_case : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_snake_case : int = [self.flip_channel_order(image=lowercase_ ) for image in images]
_snake_case : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_snake_case : List[Any] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : int = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowercase_ ):
_snake_case : Union[str, Any] = target_sizes.numpy()
_snake_case : int = []
for idx in range(len(lowercase_ ) ):
_snake_case : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ )
_snake_case : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
_snake_case : List[Any] = logits.argmax(dim=1 )
_snake_case : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 284 | 1 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list ):
for i in range(len(snake_case__ ) - 1 , 0 , -1 ):
__UpperCamelCase =False
for j in range(snake_case__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__UpperCamelCase , __UpperCamelCase =unsorted[j - 1], unsorted[j]
__UpperCamelCase =True
for j in range(snake_case__ ):
if unsorted[j] > unsorted[j + 1]:
__UpperCamelCase , __UpperCamelCase =unsorted[j + 1], unsorted[j]
__UpperCamelCase =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 62 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30 | 0 |
'''simple docstring'''
import math
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowercase : Union[str, Any] = 'Enter the base and the power separated by a comma: '
__lowercase , __lowercase : str = map(int, input(prompt).split(','))
__lowercase , __lowercase : List[str] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowercase : Optional[int] = res(xa, ya)
__lowercase : Dict = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 294 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
A_ = 42
A_ = None
A_ = None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : TreeNode | None ):
# Validation
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_snake_case = TypeVar('_T')
class UpperCamelCase ( Generic[_T] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None:
_a : list[_T] = list(iterable or [] )
_a : list[_T] = []
def __len__( self : str ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ) -> str:
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None:
self._stacka.append(UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> _T:
_a : Any = self._stacka.pop
_a : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 294 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294 | 1 |
from collections.abc import Sequence
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not arr:
return 0
__a = 0 if allow_empty_subarrays else float('''-inf''' )
__a = 0.0
for num in arr:
__a = max(0 if allow_empty_subarrays else num , curr_sum + num )
__a = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__snake_case :Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 366 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = field
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Json(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , field=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dataset , __SCREAMING_SNAKE_CASE : Union[PathLike, BinaryIO] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.')
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = '''utf-8'''
__a = to_json_kwargs
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.to_json_kwargs.pop('''path_or_buf''' , __SCREAMING_SNAKE_CASE)
__a = self.to_json_kwargs.pop('''orient''' , '''records''')
__a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False)
__a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True)
__a = self.to_json_kwargs.pop('''compression''' , __SCREAMING_SNAKE_CASE)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__SCREAMING_SNAKE_CASE) as buffer:
__a = self._write(file_obj=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''')
__a = self._write(
file_obj=self.path_or_buf , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs)
return written
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if not json_str.endswith('''\n'''):
json_str += "\n"
return json_str.encode(self.encoding)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : BinaryIO , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__SCREAMING_SNAKE_CASE)
else:
__a , __a = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__SCREAMING_SNAKE_CASE)
return written
| 131 | 0 |
'''simple docstring'''
import os
def _UpperCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(a_ ) + """/grid.txt""" ) as f:
UpperCAmelCase__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
UpperCAmelCase__ = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase__ = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 346 |
import os
from datetime import datetime as dt
from github import Github
A_ :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def A ( ) -> Any:
__UpperCamelCase : Any =Github(os.environ['GITHUB_TOKEN'] )
__UpperCamelCase : Union[str, Any] =g.get_repo('huggingface/accelerate' )
__UpperCamelCase : Tuple =repo.get_issues(state='open' )
for issue in open_issues:
__UpperCamelCase : List[Any] =sorted([comment for comment in issue.get_comments()] ,key=lambda a_ : i.created_at ,reverse=a_ )
__UpperCamelCase : str =comments[0] if len(a_ ) > 0 else None
__UpperCamelCase : Any =dt.utcnow()
__UpperCamelCase : List[str] =(current_time - issue.updated_at).days
__UpperCamelCase : Union[str, Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 71 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : int ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ = DisjunctiveConstraint(__a )
self.assertTrue(isinstance(dc.token_ids , __a ) )
with self.assertRaises(__a ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__a ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowercase (self : int ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__a ):
DisjunctiveConstraint(__a ) # fails here
def _lowercase (self : Dict ):
UpperCAmelCase_ = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ = DisjunctiveConstraint(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(1 )
UpperCAmelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(2 )
UpperCAmelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__a )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(3 )
UpperCAmelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(__a )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowercase (self : str ):
UpperCAmelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ = DisjunctiveConstraint(__a )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: int ={
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['MaskFormerFeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
SCREAMING_SNAKE_CASE_: List[str] =[
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Dict =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 106 | 1 |
def A ( a_ ,a_ ) -> int:
__UpperCamelCase : List[str] =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__UpperCamelCase : List[str] =n - k
# Calculate C(n,k)
for i in range(snake_case__ ):
result *= n - i
result //= i + 1
return result
def A ( a_ ) -> int:
return binomial_coefficient(2 * node_count ,snake_case__ ) // (node_count + 1)
def A ( a_ ) -> int:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
__UpperCamelCase : Dict =1
for i in range(1 ,n + 1 ):
result *= i
return result
def A ( a_ ) -> int:
return catalan_number(snake_case__ ) * factorial(snake_case__ )
if __name__ == "__main__":
A_ :Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
f"binary trees and {catalan_number(node_count)} binary search trees."
)
| 71 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> str:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 180 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
snake_case : Any = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
snake_case : str = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
snake_case : List[str] = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] ,reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] ,)
def lowerCamelCase__( self :int ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :Optional[int]=None ,__snake_case :List[str]=True ,__snake_case :Any=False ) -> Optional[int]:
if rouge_types is None:
a__ = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ = rouge_scorer.RougeScorer(rouge_types=__snake_case ,use_stemmer=__snake_case )
if use_aggregator:
a__ = scoring.BootstrapAggregator()
else:
a__ = []
for ref, pred in zip(__snake_case ,__snake_case ):
a__ = scorer.score(__snake_case ,__snake_case )
if use_aggregator:
aggregator.add_scores(__snake_case )
else:
scores.append(__snake_case )
if use_aggregator:
a__ = aggregator.aggregate()
else:
a__ = {}
for key in scores[0]:
a__ = [score[key] for score in scores]
return result
| 109 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = b.T
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
a__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :bool = True ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_56, 'width': 2_56}
a__ = get_size_dict(__snake_case )
a__ = np.array(__snake_case ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
a__ = rescale(image=__snake_case ,scale=1 / 1_27.5 ,data_format=__snake_case )
a__ = image - 1
return image
def lowerCamelCase__( self :Optional[Any] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(__snake_case )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(__snake_case )
a__ = color_quantize(__snake_case ,__snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(__snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(__snake_case )
else:
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 109 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCamelCase , AssumeRolePolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) )
_SCREAMING_SNAKE_CASE ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCamelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =botoa.client('iam' )
return iam_client.get_role(RoleName=_UpperCamelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =_ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , _UpperCamelCase , )
_SCREAMING_SNAKE_CASE =None
if credentials_configuration == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Profile name: [default] ' , default='default' )
_SCREAMING_SNAKE_CASE =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
_SCREAMING_SNAKE_CASE =_ask_field('AWS Access Key ID: ' )
_SCREAMING_SNAKE_CASE =aws_access_key_id
_SCREAMING_SNAKE_CASE =_ask_field('AWS Secret Access Key: ' )
_SCREAMING_SNAKE_CASE =aws_secret_access_key
_SCREAMING_SNAKE_CASE =_ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
_SCREAMING_SNAKE_CASE =aws_region
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , _UpperCamelCase , )
if role_management == 0:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your IAM role name: ' )
else:
_SCREAMING_SNAKE_CASE ='accelerate_sagemaker_execution_role'
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_custom_docker_image:
_SCREAMING_SNAKE_CASE =_ask_field('Enter your Docker image: ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_inputs_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =None
if is_sagemaker_metrics_enabled:
_SCREAMING_SNAKE_CASE =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , )
_SCREAMING_SNAKE_CASE =_ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_dynamo:
_SCREAMING_SNAKE_CASE ='dynamo_'
_SCREAMING_SNAKE_CASE =_ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
if use_custom_options:
_SCREAMING_SNAKE_CASE =_ask_options(
'Which mode do you want to use?' , _UpperCamelCase , lambda _UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCamelCase )] , default='default' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=_UpperCamelCase , error_message='Please enter yes or no.' , )
_SCREAMING_SNAKE_CASE ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
_SCREAMING_SNAKE_CASE =_ask_options(
_UpperCamelCase , _UpperCamelCase , lambda _UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_SCREAMING_SNAKE_CASE =_ask_field(_UpperCamelCase , lambda _UpperCamelCase : str(_UpperCamelCase ).lower() , default='ml.p3.2xlarge' )
_SCREAMING_SNAKE_CASE =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_SCREAMING_SNAKE_CASE =_ask_field(
'How many machines do you want use? [1]: ' , _UpperCamelCase , default=1 , )
_SCREAMING_SNAKE_CASE =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=_UpperCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCamelCase , use_cpu=_UpperCamelCase , dynamo_config=_UpperCamelCase , eca_instance_type=_UpperCamelCase , profile=_UpperCamelCase , region=_UpperCamelCase , iam_role_name=_UpperCamelCase , mixed_precision=_UpperCamelCase , num_machines=_UpperCamelCase , sagemaker_inputs_file=_UpperCamelCase , sagemaker_metrics_file=_UpperCamelCase , )
| 47 | 0 |
import os
import pytest
from attr import dataclass
lowerCamelCase : List[str] = 'us-east-1' # defaults region
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 42
_snake_case = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
_snake_case = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5_500,
}
_snake_case = {**hyperparameters, "max_steps": 1_000}
@property
def UpperCAmelCase ( self ) -> Tuple:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase ( self ) -> str:
return f"""{self.framework}-transfromers-test"""
@property
def UpperCAmelCase ( self ) -> str:
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCAmelCase ( self ) -> int:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 352 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[Any] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase_ : Dict = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowercase_ : Union[str, Any] = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase_ : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase_ : int = sorted(arg_to_scheduler.keys())
lowercase_ : str = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=None , snake_case__ : List[Any]="base" , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , **snake_case__ : List[Any] , ):
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(snake_case__ )
_UpperCAmelCase = 0
_UpperCAmelCase = Path(self.hparams.output_dir )
_UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=snake_case__ , **snake_case__ , )
else:
_UpperCAmelCase = config
_UpperCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , snake_case__ , snake_case__ ):
assert hasattr(self.config , snake_case__ ), F"""model config doesn\'t have a `{p}` attribute"""
setattr(self.config , snake_case__ , getattr(self.hparams , snake_case__ ) )
if tokenizer is None:
_UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=snake_case__ , )
else:
_UpperCAmelCase = tokenizer
_UpperCAmelCase = MODEL_MODES[mode]
if model is None:
_UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=snake_case__ , )
else:
_UpperCAmelCase = model
def UpperCamelCase ( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = self.model_type.from_pretrained(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
_UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_UpperCAmelCase = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = self.model
_UpperCAmelCase = ["""bias""", """LayerNorm.weight"""]
_UpperCAmelCase = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
_UpperCAmelCase = Adafactor(
snake_case__ , lr=self.hparams.learning_rate , scale_parameter=snake_case__ , relative_step=snake_case__ )
else:
_UpperCAmelCase = AdamW(
snake_case__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_UpperCAmelCase = optimizer
_UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCamelCase ( self : int , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.validation_step(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
return self.validation_end(snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCamelCase ( self : Dict , snake_case__ : str ):
"""simple docstring"""
if stage == "test":
_UpperCAmelCase = len(self.test_dataloader().dataset )
else:
_UpperCAmelCase = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=snake_case__ )
_UpperCAmelCase = len(self.train_dataloader().dataset )
def UpperCamelCase ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple = False ):
"""simple docstring"""
raise NotImplementedError("You must implement this for your task" )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.train_loader
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
snake_case__ , list(filter(snake_case__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCamelCase ( self : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
_UpperCAmelCase = self.output_dir.joinpath("best_tfmr" )
_UpperCAmelCase = self.step_count
self.model.save_pretrained(snake_case__ )
self.tokenizer.save_pretrained(snake_case__ )
@staticmethod
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
parser.add_argument(
"--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=snake_case__ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=snake_case__ , type=snake_case__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(snake_case__ ).parent / "test_run" / "cache" ) , type=snake_case__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=snake_case__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=snake_case__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=snake_case__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=snake_case__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=snake_case__ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=snake_case__ , metavar=snake_case__ , type=snake_case__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=snake_case__ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=snake_case__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=snake_case__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=snake_case__ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=snake_case__ )
parser.add_argument("--train_batch_size" , default=32 , type=snake_case__ )
parser.add_argument("--eval_batch_size" , default=32 , type=snake_case__ )
parser.add_argument("--adafactor" , action="store_true" )
class __lowerCAmelCase ( pl.Callback ):
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCamelCase ( self : int , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(snake_case__ )
class __lowerCAmelCase ( pl.Callback ):
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
rank_zero_info("***** Validation results *****" )
_UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(snake_case__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) )
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
rank_zero_info("***** Test results *****" )
_UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
_UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(snake_case__ , "w" ) as writer:
for key in sorted(snake_case__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
parser.add_argument(
"--output_dir" , default=str(Path(_lowercase ).parent / "test_run" / "model_checkpoints" ) , type=_lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_lowercase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_lowercase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_lowercase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_lowercase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_lowercase ).parent / "test_run" / "dummy-train-data" ) , type=_lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=[] , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_lowercase )
# add custom checkpoints
if checkpoint_callback is None:
_UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_lowercase )
if logging_callback is None:
_UpperCAmelCase = LoggingCallback()
_UpperCAmelCase = {}
if args.fpaa:
_UpperCAmelCase = 16
if args.gpus > 1:
_UpperCAmelCase = """auto"""
_UpperCAmelCase = """ddp"""
_UpperCAmelCase = args.accumulate_grad_batches
_UpperCAmelCase = None
_UpperCAmelCase = """auto"""
_UpperCAmelCase = pl.Trainer.from_argparse_args(
_lowercase , weights_summary=_lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowercase , )
if args.do_train:
trainer.fit(_lowercase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 133 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self: int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = (3, 32, 1_28)
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
__lowerCamelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: int ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , **UpperCamelCase_: Dict ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__lowerCamelCase = Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__lowerCamelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """test"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.char_decode(UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ )
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = None
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = MgpstrProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = torch.randn(1 , 27 , 38 )
__lowerCamelCase = torch.randn(1 , 27 , 5_02_57 )
__lowerCamelCase = torch.randn(1 , 27 , 3_05_22 )
__lowerCamelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 29 |
from math import ceil, sqrt
def lowerCamelCase__ ( A__ : int = 1000000 ):
'''simple docstring'''
__lowerCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__lowerCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__lowerCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 29 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'image': Image()} )
_a = Features({'labels': ClassLabel} )
_a = "image"
_a = "labels"
def __lowercase ( self : List[str] , lowerCAmelCase : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __lowercase ( self : Optional[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 155 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase: Dict = logging.get_logger(__name__)
_lowercase: Any = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( __snake_case ):
"""simple docstring"""
__A = 'xlm-roberta'
def __init__(self , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=2 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class _lowercase ( __snake_case ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.task == "multiple-choice":
a = {0: "batch", 1: "choice", 2: "sequence"}
else:
a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 362 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor:
a = []
a = Counter()
a = 0
a = defaultdict(lowerCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
for candidate in candidates:
a = candidate + "\n" + test_case
a = (test_program, timeout, task_id, completion_id[task_id])
a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ )
futures.append(lowerCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase_ ):
a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
a , a = [], []
for result in results.values():
result.sort()
a = [r[1]["passed"] for r in result]
total.append(len(lowerCamelCase_ ) )
correct.append(sum(lowerCamelCase_ ) )
a = np.array(lowerCamelCase_ )
a = np.array(lowerCamelCase_ )
a = k
a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
def estimator(A : int , A : int , A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A , A ):
a = itertools.repeat(A , len(A ) )
else:
assert len(A ) == len(A )
a = iter(A )
return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
| 71 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = 1_3
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = 9_9
__lowerCAmelCase = 3_2
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 3_7
__lowerCAmelCase = 'gelu'
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 5_1_2
__lowerCAmelCase = 1_6
__lowerCAmelCase = 2
__lowerCAmelCase = 0.02
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = None
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = TFDistilBertModel(config=lowerCAmelCase_ )
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase = model(lowerCAmelCase_ )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> List[str]:
__lowerCAmelCase = TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Tuple:
__lowerCAmelCase = TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
__lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> Dict:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDistilBertForSequenceClassification(lowerCAmelCase_ )
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Dict:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFDistilBertForMultipleChoice(lowerCAmelCase_ )
__lowerCAmelCase = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFDistilBertForTokenClassification(lowerCAmelCase_ )
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFDistilBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , dim=3_7 )
def lowercase ( self : Tuple ) -> Any:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : str ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : List[str] ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowerCAmelCase = TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 284 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase__ = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
__lowerCAmelCase = 2
# New Code #
__lowerCAmelCase = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 102 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class UpperCamelCase ( snake_case_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCamelCase : str = "text"
UpperCamelCase : str = "labels"
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCAmelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_a : Optional[int] = copy.deepcopy(self )
_a : Tuple = self.label_schema.copy()
_a : Any = features[self.label_column]
_a : Tuple = label_schema
return task_template
@property
def _lowercase ( self : List[str] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 294 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 1 |
'''simple docstring'''
UpperCamelCase_ : List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase_ : Optional[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __a ( _UpperCamelCase: dict[int, list[int]] , _UpperCamelCase: int , _UpperCamelCase: list[bool] ) -> list[int]:
"""simple docstring"""
_snake_case = True
_snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __a ( _UpperCamelCase: dict[int, list[int]] , _UpperCamelCase: int , _UpperCamelCase: list[bool] ) -> list[int]:
"""simple docstring"""
_snake_case = True
_snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def __a ( _UpperCamelCase: dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
_snake_case = len(_UpperCamelCase ) * [False]
_snake_case = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
_snake_case = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_snake_case = []
_snake_case = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
_snake_case = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
_snake_case = find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 350 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __a ( _UpperCamelCase: list[Any] ) -> None:
"""simple docstring"""
create_state_space_tree(_UpperCamelCase , [] , 0 )
def __a ( _UpperCamelCase: list[Any] , _UpperCamelCase: list[Any] , _UpperCamelCase: int ) -> None:
"""simple docstring"""
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCamelCase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 142 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _a ( unittest.TestCase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=4 , )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = num_choices
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : int = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ : str = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase__ ( __UpperCamelCase = True , *__UpperCamelCase , **__UpperCamelCase )-> Any:
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase , **__UpperCamelCase , disable=__UpperCamelCase )
| 183 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ = 10_00 ):
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Union[str, Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 106 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 1 |
"""simple docstring"""
from timeit import timeit
SCREAMING_SNAKE_CASE = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
A__ = 0
A__ = len(lowercase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
A__ = len(lowercase_ ) // 2
A__ = len(lowercase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowercase_ ) )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
if len(lowercase_ ) <= 2:
return True
if s[0] == s[len(lowercase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
return s == s[::-1]
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
A__ = f"""all({name}(key) is value for key, value in test_data.items())"""
A__ = f"""from __main__ import test_data, {name}"""
A__ = 50_00_00
A__ = timeit(stmt=lowercase_ , setup=lowercase_ , number=lowercase_ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 230 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''AutoImageProcessor'''
lowercase__ = '''AutoTokenizer'''
def __init__( self : str , snake_case_ : Dict , snake_case_ : List[str] ) -> str:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ )
A__ = self.image_processor
def __call__( self : int , snake_case_ : Any=None , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , **snake_case_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Optional[int] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 230 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = CodeGenTokenizer
lowercase = CodeGenTokenizerFast
lowercase = True
lowercase = {"add_prefix_space": True}
lowercase = False
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase__ : Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase__ : Optional[int] = {"unk_token": "<unk>"}
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def _lowerCamelCase ( self : List[Any] , **a : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _lowerCamelCase ( self : List[Any] , **a : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def _lowerCamelCase ( self : Dict , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = "lower newer"
lowerCAmelCase__ : Tuple = "lower newer"
return input_text, output_text
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ : Union[str, Any] = "lower newer"
lowerCAmelCase__ : Optional[int] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase__ : List[str] = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ : int = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowerCAmelCase__ : Union[str, Any] = "lower newer"
# Testing tokenization
lowerCAmelCase__ : Optional[Any] = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase__ : List[str] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids without special tokens
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids with special tokens
lowerCAmelCase__ : Tuple = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowerCAmelCase__ : List[Any] = tokenizer.encode(snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase__ : int = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing the unknown token
lowerCAmelCase__ : str = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase__ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def _lowerCamelCase ( self : Union[str, Any] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple , a : Optional[Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# Simple input
lowerCAmelCase__ : Any = "This is a simple input"
lowerCAmelCase__ : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ : Dict = ("This is a simple input", "This is a pair")
lowerCAmelCase__ : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCAmelCase__ : str = "This is a simple input"
lowerCAmelCase__ : List[Any] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase__ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase__ : Tuple = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase__ : Optional[int] = tokenizer.pad_token_id
lowerCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' )
lowerCAmelCase__ : List[Any] = tokenizer(*snake_case__ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCAmelCase__ : Dict = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = "$$$"
lowerCAmelCase__ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case__ , add_bos_token=snake_case__ )
lowerCAmelCase__ : int = "This is a simple input"
lowerCAmelCase__ : int = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ : int = tokenizer.bos_token_id
lowerCAmelCase__ : Optional[int] = tokenizer(snake_case__ )
lowerCAmelCase__ : int = tokenizer(snake_case__ )
self.assertEqual(out_s.input_ids[0] , snake_case__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase__ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase__ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
lowerCAmelCase__ : Any = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase__ : List[Any] = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase__ : List[str] = tokenizer.encode(snake_case__ )
lowerCAmelCase__ : Any = ["^#", re.escape('<|endoftext|>' ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase__ : int = tokenizer.decode(snake_case__ , truncate_before_pattern=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass | 212 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="llama"
a : List[str] =["past_key_values"]
def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = num_key_value_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Any = rms_norm_eps
lowerCAmelCase : List[Any] = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 108 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "pix2struct_text_model"
__UpperCAmelCase : Any = ["past_key_values"]
__UpperCAmelCase : List[Any] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str], UpperCAmelCase__ : Union[str, Any]=5_0_2_4_4, UpperCAmelCase__ : str=7_6_8, UpperCAmelCase__ : Union[str, Any]=6_4, UpperCAmelCase__ : Dict=2_0_4_8, UpperCAmelCase__ : Optional[int]=1_2, UpperCAmelCase__ : Optional[Any]=1_2, UpperCAmelCase__ : Dict=3_2, UpperCAmelCase__ : List[str]=1_2_8, UpperCAmelCase__ : Optional[Any]=0.1, UpperCAmelCase__ : Any=1E-6, UpperCAmelCase__ : List[Any]=1.0, UpperCAmelCase__ : int="gelu_new", UpperCAmelCase__ : Optional[Any]=0, UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Tuple=0, UpperCAmelCase__ : Tuple=1, UpperCAmelCase__ : Tuple=False, UpperCAmelCase__ : Optional[Any]=True, **UpperCAmelCase__ : Optional[int], ):
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = use_cache
__lowercase = eos_token_id
__lowercase = decoder_start_token_id
# for backwards compatibility
__lowercase = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase__, eos_token_id=UpperCAmelCase__, decoder_start_token_id=UpperCAmelCase__, tie_word_embeddings=UpperCAmelCase__, is_decoder=UpperCAmelCase__, **UpperCAmelCase__, )
@classmethod
def _lowercase ( cls : Tuple, UpperCAmelCase__ : Union[str, os.PathLike], **UpperCAmelCase__ : Optional[int] ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
__lowercase ,__lowercase = cls.get_config_dict(UpperCAmelCase__, **UpperCAmelCase__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__, **UpperCAmelCase__ )
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "pix2struct_vision_model"
def __init__( self : Optional[Any], UpperCAmelCase__ : Optional[int]=7_6_8, UpperCAmelCase__ : Tuple=7_6_8, UpperCAmelCase__ : List[str]=2_0_4_8, UpperCAmelCase__ : Any=6_4, UpperCAmelCase__ : str=1_2, UpperCAmelCase__ : Dict=1_2, UpperCAmelCase__ : Union[str, Any]="gelu_new", UpperCAmelCase__ : Optional[Any]=1E-6, UpperCAmelCase__ : Dict=0.0, UpperCAmelCase__ : Union[str, Any]=0.0, UpperCAmelCase__ : Optional[Any]=1E-10, UpperCAmelCase__ : List[str]=1.0, UpperCAmelCase__ : Tuple=4_0_9_6, UpperCAmelCase__ : str=3_2, UpperCAmelCase__ : List[Any]=1_2_8, **UpperCAmelCase__ : str, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = hidden_size
__lowercase = patch_embed_hidden_size
__lowercase = d_ff
__lowercase = dropout_rate
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = initializer_range
__lowercase = initializer_factor
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = dense_act_fn
__lowercase = seq_len
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = d_kv
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Union[str, os.PathLike], **UpperCAmelCase__ : int ):
cls._set_token_in_kwargs(UpperCAmelCase__ )
__lowercase ,__lowercase = cls.get_config_dict(UpperCAmelCase__, **UpperCAmelCase__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowercase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase__, **UpperCAmelCase__ )
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "pix2struct"
__UpperCAmelCase : int = True
def __init__( self : Optional[Any], UpperCAmelCase__ : Any=None, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[Any]=1.0, UpperCAmelCase__ : int=0.02, UpperCAmelCase__ : Any=False, UpperCAmelCase__ : Tuple=False, UpperCAmelCase__ : int=True, **UpperCAmelCase__ : Any, ):
super().__init__(tie_word_embeddings=UpperCAmelCase__, is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
if text_config is None:
__lowercase = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
__lowercase = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
__lowercase = PixaStructTextConfig(**UpperCAmelCase__ )
__lowercase = PixaStructVisionConfig(**UpperCAmelCase__ )
__lowercase = self.text_config.decoder_start_token_id
__lowercase = self.text_config.pad_token_id
__lowercase = self.text_config.eos_token_id
__lowercase = initializer_factor
__lowercase = initializer_range
__lowercase = self.initializer_range
__lowercase = self.initializer_range
__lowercase = is_vqa
@classmethod
def _lowercase ( cls : Union[str, Any], UpperCAmelCase__ : PixaStructTextConfig, UpperCAmelCase__ : PixaStructVisionConfig, **UpperCAmelCase__ : List[str] ):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 353 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 144 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ):
__A : List[str] = ["input_features"]
def __init__( self : List[str] , lowercase_ : List[Any]=80 , lowercase_ : List[str]=1_60_00 , lowercase_ : Optional[int]=1_60 , lowercase_ : Tuple=30 , lowercase_ : Optional[Any]=4_00 , lowercase_ : str=0.0 , lowercase_ : Any=False , **lowercase_ : Tuple , ) -> Union[str, Any]:
super().__init__(
feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowercase__ : Tuple = n_fft
lowercase__ : int = hop_length
lowercase__ : Tuple = chunk_length
lowercase__ : List[Any] = chunk_length * sampling_rate
lowercase__ : Union[str, Any] = self.n_samples // hop_length
lowercase__ : Tuple = sampling_rate
lowercase__ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase_ , norm="slaney" , mel_scale="slaney" , )
def __UpperCamelCase ( self : List[str] , lowercase_ : np.array ) -> np.ndarray:
lowercase__ : Tuple = spectrogram(
lowercase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase__ : int = log_spec[:, :-1]
lowercase__ : int = np.maximum(lowercase_ , log_spec.max() - 8.0 )
lowercase__ : List[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCamelCase ( lowercase_ : List[np.ndarray] , lowercase_ : List[np.ndarray] , lowercase_ : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowercase__ : int = np.array(lowercase_ , np.intaa )
lowercase__ : List[Any] = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ):
lowercase__ : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__ : Union[str, Any] = padding_value
normed_input_values.append(lowercase_ )
else:
lowercase__ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : int , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : bool = True , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "max_length" , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : Optional[int] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : List[Any] = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ : Union[str, Any] = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
lowercase__ : int = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Dict = [np.asarray([raw_speech] ).T]
lowercase__ : Union[str, Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase__ : Any = self.pad(
lowercase_ , padding=lowercase_ , max_length=max_length if max_length else self.n_samples , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : str = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase__ : Dict = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase__ : str = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase__ : List[str] = [self._np_extract_fbank_features(lowercase_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase_ ):
lowercase__ : List[Any] = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : Tuple = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : List[Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : Dict = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
def __UpperCamelCase ( self : List[str] ) -> Dict[str, Any]:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Union[str, Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 87 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase__ ( _UpperCAmelCase ):
A__ : Union[str, Any] ="""Wav2Vec2FeatureExtractor"""
A__ : Any ="""AutoTokenizer"""
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
@classmethod
def A_ ( cls : Union[str, Any] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ):
try:
return super().from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
return cls(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
def __call__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE__ = kwargs.pop('audio' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('text' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ = encodings['input_ids']
return inputs
def A_ ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('input_features' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('labels' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ = labels['input_ids']
return input_features
def A_ ( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def A_ ( self : str ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
| 176 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( A__ ):
"""simple docstring"""
_a = ['image_processor', 'tokenizer']
_a = 'BridgeTowerImageProcessor'
_a = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel_values + pixel_mask
UpperCamelCase__ :Tuple = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , **UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.tokenizer.model_input_names
UpperCamelCase__ :str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 219 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''gpt_neox_japanese'''
def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=2_5_6_0 , _UpperCamelCase=3_2 , _UpperCamelCase=3_2 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=1.00 , _UpperCamelCase=1_0_0_0_0 , _UpperCamelCase=2_0_4_8 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=True , _UpperCamelCase=3_1_9_9_6 , _UpperCamelCase=3_1_9_9_9 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , **_UpperCamelCase , ) -> str:
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_multiple_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = rotary_pct
UpperCAmelCase_ : Tuple = rotary_emb_base
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : List[str] = hidden_dropout
| 29 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str=0.9_9_9 , SCREAMING_SNAKE_CASE : List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a__ : Optional[Any] =[]
for i in range(SCREAMING_SNAKE_CASE ):
a__ : Any =i / num_diffusion_timesteps
a__ : Any =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_):
_lowercase : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_lowercase : Dict = 2
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.0_00_85 , lowerCAmelCase__ = 0.0_12 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "linspace" , lowerCAmelCase__ = 0 , ) -> Optional[int]:
'''simple docstring'''
if trained_betas is not None:
a__ : Any =torch.tensor(__lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
a__ : str =torch.linspace(__lowercase , __lowercase , __lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ : List[str] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ : Optional[int] =betas_for_alpha_bar(__lowercase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a__ : Any =1.0 - self.betas
a__ : Optional[int] =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__lowercase , __lowercase , __lowercase )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
'''simple docstring'''
if schedule_timesteps is None:
a__ : Dict =self.timesteps
a__ : List[str] =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a__ : int =1 if len(__lowercase ) > 1 else 0
else:
a__ : str =timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
a__ : Union[str, Any] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
a__ : Dict =self.index_for_timestep(__lowercase )
if self.state_in_first_order:
a__ : Dict =self.sigmas[step_index]
else:
a__ : int =self.sigmas_interpol[step_index]
a__ : Tuple =sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =num_inference_steps
a__ : Any =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a__ : List[Any] =np.linspace(0 , num_train_timesteps - 1 , __lowercase , dtype=__lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a__ : Dict =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : List[str] =(np.arange(0 , __lowercase ) * step_ratio).round()[::-1].copy().astype(__lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a__ : Optional[Any] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ : Dict =(np.arange(__lowercase , 0 , -step_ratio )).round().copy().astype(__lowercase )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a__ : List[str] =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a__ : Tuple =torch.from_numpy(np.log(__lowercase ) ).to(__lowercase )
a__ : Union[str, Any] =np.interp(__lowercase , np.arange(0 , len(__lowercase ) ) , __lowercase )
a__ : Any =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a__ : Optional[Any] =torch.from_numpy(__lowercase ).to(device=__lowercase )
# interpolate sigmas
a__ : Optional[int] =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a__ : Tuple =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a__ : str =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__lowercase ).startswith("mps" ):
# mps does not support float64
a__ : Any =torch.from_numpy(__lowercase ).to(__lowercase , dtype=torch.floataa )
else:
a__ : List[str] =torch.from_numpy(__lowercase ).to(__lowercase )
# interpolate timesteps
a__ : int =self.sigma_to_t(__lowercase ).to(__lowercase , dtype=timesteps.dtype )
a__ : Union[str, Any] =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a__ : Optional[Any] =torch.cat([timesteps[:1], interleaved_timesteps] )
a__ : Union[str, Any] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a__ : str =defaultdict(__lowercase )
def _lowercase ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] =sigma.log()
# get distribution
a__ : Optional[Any] =log_sigma - self.log_sigmas[:, None]
# get sigmas range
a__ : Tuple =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a__ : List[str] =low_idx + 1
a__ : str =self.log_sigmas[low_idx]
a__ : List[str] =self.log_sigmas[high_idx]
# interpolate sigmas
a__ : Tuple =(low - log_sigma) / (low - high)
a__ : List[str] =w.clamp(0 , 1 )
# transform interpolation to time range
a__ : Tuple =(1 - w) * low_idx + w * high_idx
a__ : Optional[int] =t.view(sigma.shape )
return t
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.sample is None
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
a__ : Union[str, Any] =self.index_for_timestep(__lowercase )
# advance index counter by 1
a__ : int =timestep.cpu().item() if torch.is_tensor(__lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a__ : List[Any] =self.sigmas[step_index]
a__ : List[str] =self.sigmas_interpol[step_index + 1]
a__ : Optional[Any] =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a__ : List[Any] =self.sigmas[step_index - 1]
a__ : List[Any] =self.sigmas_interpol[step_index]
a__ : str =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a__ : str =0
a__ : Optional[int] =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a__ : Optional[Any] =sigma_hat if self.state_in_first_order else sigma_interpol
a__ : List[Any] =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a__ : Dict =sigma_hat if self.state_in_first_order else sigma_interpol
a__ : List[Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a__ : List[Any] =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a__ : Union[str, Any] =sigma_interpol - sigma_hat
# store for 2nd order step
a__ : Any =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a__ : Union[str, Any] =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a__ : Tuple =sigma_next - sigma_hat
a__ : Tuple =self.sample
a__ : int =None
a__ : Optional[Any] =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
a__ : Any =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowercase ):
# mps does not support float64
a__ : Dict =self.timesteps.to(original_samples.device , dtype=torch.floataa )
a__ : Optional[Any] =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a__ : Any =self.timesteps.to(original_samples.device )
a__ : List[str] =timesteps.to(original_samples.device )
a__ : List[Any] =[self.index_for_timestep(__lowercase , __lowercase ) for t in timesteps]
a__ : Any =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a__ : Any =sigma.unsqueeze(-1 )
a__ : Optional[int] =original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 367 |
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : Tuple = [1, 3, 5, 7, 9]
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : str =0
for digit in range(10 ):
a__ : int =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
a__ : List[str] =0
for digita in range(10 ):
a__ : Optional[int] =digita
if (remainder + digita) % 2 == 0:
a__ : Dict =ODD_DIGITS
else:
a__ : Any =EVEN_DIGITS
for digita in other_parity_digits:
a__ : Union[str, Any] =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _A ( SCREAMING_SNAKE_CASE : int = 9 ):
"""simple docstring"""
a__ : List[str] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase_ : Union[str, Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCAmelCase_ : List[Any] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCAmelCase_ : Any = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="auto" , SCREAMING_SNAKE_CASE__ : Any=-1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.9 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=5_0_0 , SCREAMING_SNAKE_CASE__ : Dict="gpt2-large" , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=2_5 , ) -> Dict:
a_ : Dict = compute_mauve(
p_text=SCREAMING_SNAKE_CASE__ , q_text=SCREAMING_SNAKE_CASE__ , p_features=SCREAMING_SNAKE_CASE__ , q_features=SCREAMING_SNAKE_CASE__ , p_tokens=SCREAMING_SNAKE_CASE__ , q_tokens=SCREAMING_SNAKE_CASE__ , num_buckets=SCREAMING_SNAKE_CASE__ , pca_max_data=SCREAMING_SNAKE_CASE__ , kmeans_explained_var=SCREAMING_SNAKE_CASE__ , kmeans_num_redo=SCREAMING_SNAKE_CASE__ , kmeans_max_iter=SCREAMING_SNAKE_CASE__ , featurize_model_name=SCREAMING_SNAKE_CASE__ , device_id=SCREAMING_SNAKE_CASE__ , max_text_length=SCREAMING_SNAKE_CASE__ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE__ , mauve_scaling_factor=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ , )
return out
| 32 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json'''}
_snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_snake_case = {'''mgp-str''': 27}
class _snake_case ( _lowercase ):
lowerCamelCase__: int = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict="[GO]" , __lowerCamelCase: Union[str, Any]="[GO]" , __lowerCamelCase: int="[s]" , __lowerCamelCase: List[str]="[GO]" , **__lowerCamelCase: Tuple ) -> List[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : str = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _lowerCamelCase ( self: Dict ) -> Tuple:
return len(self.vocab )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : int = []
for s in text:
char_tokens.extend(__lowerCamelCase )
return char_tokens
def _lowerCamelCase ( self: Any , __lowerCamelCase: Dict ) -> Optional[int]:
return self.vocab.get(__lowerCamelCase , self.vocab.get(self.unk_token ) )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple ) -> int:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowerCamelCase ) )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
return (vocab_file,)
| 342 | import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_snake_case = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : str = bs[:]
__UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__, snake_case__ ) )
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[Any] = set()
__UpperCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
return pairs
class _snake_case ( _lowercase ):
lowerCamelCase__: str = VOCAB_FILES_NAMES
lowerCamelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__: Dict = ["input_ids", "attention_mask"]
def __init__( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]="replace" , __lowerCamelCase: List[str]="<s>" , __lowerCamelCase: List[str]="</s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<s>" , __lowerCamelCase: Optional[int]="<unk>" , __lowerCamelCase: Any="<pad>" , __lowerCamelCase: List[str]="<mask>" , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ) -> List[str]:
__UpperCAmelCase : int = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
__UpperCAmelCase : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
__UpperCAmelCase : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
__UpperCAmelCase : List[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : List[Any] = json.load(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Dict = errors # how to handle errors in decoding
__UpperCAmelCase : Optional[int] = bytes_to_unicode()
__UpperCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : int = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCamelCase ( self: Dict ) -> Any:
return len(self.encoder )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self: int , __lowerCamelCase: List[Any] ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : Dict = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
while i < len(__lowerCamelCase ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : List[Any] = tuple(__lowerCamelCase )
__UpperCAmelCase : str = new_word
if len(__lowerCamelCase ) == 1:
break
else:
__UpperCAmelCase : Optional[Any] = get_pairs(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = word
return word
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> Dict:
__UpperCAmelCase : Any = []
for token in re.findall(self.pat , __lowerCamelCase ):
__UpperCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _lowerCamelCase ( self: int , __lowerCamelCase: str ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[Any] ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def _lowerCamelCase ( self: Any , __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : Dict = "".join(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
__UpperCAmelCase : Optional[Any] = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : Optional[Any] = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self: str , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Optional[Any] = " " + text
return (text, kwargs)
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[str]:
return token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: "Conversation" ) -> List[int]:
__UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = " ".join(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
__UpperCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 342 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =IFInpaintingPipeline
lowerCamelCase__ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowerCamelCase__ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase__ =PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : int = torch.manual_seed(a_ )
else:
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
__snake_case : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
__snake_case : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 102 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : int ) ->List[Any]:
"""simple docstring"""
if openai_config_file == "":
__snake_case : Dict = OpenAIGPTConfig()
else:
__snake_case : int = OpenAIGPTConfig.from_json_file(_snake_case )
__snake_case : Tuple = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
__snake_case : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 102 | 1 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[Any], a_: Optional[Any], a_: int, a_: Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(_A ), len(_A ) )
for a, b in zip(_A, _A ):
self.assertAlmostEqual(_A, _A, delta=_A )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_A ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step, 3 )
self.assertEqual(len(accumulator.gradients ), 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1E-2 )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Any = None
ops.enable_eager_execution_internal()
_snake_case : Tuple = tf.config.list_physical_devices("""CPU""" )
if len(_A ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_snake_case : Union[str, Any] = tf.config.list_logical_devices(device_type="""CPU""" )
_snake_case : Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_snake_case : Dict = GradientAccumulator()
_snake_case : Any = tf.Variable([4.0, 3.0] )
_snake_case : Dict = create_optimizer(5E-5, 10, 5 )
_snake_case : Optional[int] = tf.Variable([0.0, 0.0], trainable=_A )
def accumulate_on_replica(a_: Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) )
@tf.function
def accumulate(a_: Optional[Any], a_: Union[str, Any] ):
with strategy.scope():
_snake_case : Tuple = strategy.experimental_local_results(_A )
local_variables[0].assign(_A )
local_variables[1].assign(_A )
strategy.run(_A, args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_A )
def _check_local_values(a_: Tuple, a_: List[Any] ):
_snake_case : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value(), _A, tol=1E-2 )
self.assertListAlmostEqual(values[1].value(), _A, tol=1E-2 )
accumulate([1.0, 2.0], [-1.0, 1.0] )
accumulate([3.0, -1.0], [-1.0, -1.0] )
accumulate([-2.0, 2.0], [3.0, -2.0] )
self.assertEqual(accumulator.step, 3 )
_check_local_values([2.0, 3.0], [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
_check_local_values([0.0, 0.0], [0.0, 0.0] )
| 357 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int:
return (pow(snake_case__ , 2 ) + step) % modulus
for _ in range(snake_case__ ):
# These track the position within the cycle detection logic.
_snake_case : Optional[int] = seed
_snake_case : str = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_snake_case : Any = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : Optional[Any] = rand_fn(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = rand_fn(snake_case__ , snake_case__ , snake_case__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_snake_case : str = gcd(hare - tortoise , snake_case__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_snake_case : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
A_ = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
A_ = parser.parse_args()
A_ = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
A_ = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 132 | 0 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : List[Any] ):
_UpperCAmelCase = psutil.Process()
_UpperCAmelCase = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = -1
while True:
_UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = True
_UpperCAmelCase = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase = True
self.thread.start()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
UpperCAmelCase__ = PeakCPUMemory()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
_UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = (torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
_UpperCAmelCase = (torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB''' )
_UpperCAmelCase = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 289 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
_UpperCAmelCase : int = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *A : int , **A : Optional[int] ) ->str:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : Any , *A : Optional[int] , **A : int ) ->int:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : List[Any] , *A : List[str] , **A : str ) ->Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 142 | 0 |
import math
from collections.abc import Callable
def lowerCamelCase__ ( a , a , a ) -> float:
_A: float = xa
_A: float = xa
while True:
if x_n == x_na or function(a ) == function(a ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
_A: float = x_na - (
function(a ) / ((function(a ) - function(a )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_A: List[Any] = x_na
_A: str = x_na
def lowerCamelCase__ ( a ) -> float:
return math.pow(a , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 301 |
def lowerCamelCase__ ( a = 10**9 ) -> int:
_A: Dict = 1
_A: Union[str, Any] = 2
_A: List[str] = 0
_A: List[Any] = 0
_A: int = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_A: List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 301 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int = 1000 ) -> int:
lowerCamelCase_ = 2**power
lowerCamelCase_ = str(_lowerCamelCase )
lowerCamelCase_ = list(_lowerCamelCase )
lowerCamelCase_ = 0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
_SCREAMING_SNAKE_CASE : Tuple = solution(power)
print('''Sum of the digits is: ''', result)
| 183 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = str(_lowerCamelCase )
lowerCamelCase_ = [n]
for i in range(1 , len(_lowerCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
if len(str(_lowerCamelCase ) ) > 3:
if not is_prime(int(str(_lowerCamelCase )[-3:] ) ) or not is_prime(int(str(_lowerCamelCase )[:3] ) ):
return False
return True
def lowerCamelCase__ ( _lowerCamelCase : int = 11 ) -> list[int]:
lowerCamelCase_ = []
lowerCamelCase_ = 13
while len(_lowerCamelCase ) != count:
if validate(_lowerCamelCase ):
lowerCamelCase_ = list_truncated_nums(_lowerCamelCase )
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCamelCase )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 183 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Any:
A: Dict = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A: Tuple = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A: str = F"""{src_lang}-{tgt_lang}"""
A: Optional[Any] = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__lowercase , exist_ok=__lowercase )
A: Dict = os.path.join(__lowercase , '''README.md''' )
print(F"""Generating {path}""" )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowercase )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split('''-''')
UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 0 |
from importlib import import_module
from .logging import get_logger
A__ = get_logger(__name__)
class a :
def __init__( self :Optional[int] ,__lowercase :List[str] ,__lowercase :Any=None ):
snake_case__ : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self ,__lowercase ,getattr(__lowercase ,__lowercase ) )
snake_case__ : Optional[Any] = module._original_module if isinstance(__lowercase ,_PatchedModuleObj ) else module
class a :
__lowerCAmelCase : Any = []
def __init__( self :List[str] ,__lowercase :Optional[Any] ,__lowercase :str ,__lowercase :Dict ,__lowercase :Any=None ):
snake_case__ : Dict = obj
snake_case__ : Dict = target
snake_case__ : List[str] = new
snake_case__ : int = target.split('''.''' )[0]
snake_case__ : List[str] = {}
snake_case__ : Any = attrs or []
def __enter__( self :Tuple ):
*snake_case__ , snake_case__ : str = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowercase ) ):
try:
snake_case__ : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
snake_case__ : Optional[int] = getattr(self.obj ,__lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowercase ,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
snake_case__ : List[Any] = obj_attr
# patch at top level
setattr(self.obj ,__lowercase ,_PatchedModuleObj(__lowercase ,attrs=self.attrs ) )
snake_case__ : List[Any] = getattr(self.obj ,__lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowercase ,__lowercase ,_PatchedModuleObj(getattr(__lowercase ,__lowercase ,__lowercase ) ,attrs=self.attrs ) )
snake_case__ : List[Any] = getattr(__lowercase ,__lowercase )
# finally set the target attribute
setattr(__lowercase ,__lowercase ,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
snake_case__ : int = getattr(import_module('''.'''.join(__lowercase ) ) ,__lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj ,__lowercase ) is attr_value:
snake_case__ : str = getattr(self.obj ,__lowercase )
setattr(self.obj ,__lowercase ,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
snake_case__ : str = globals()['''__builtins__'''][target_attr]
setattr(self.obj ,__lowercase ,self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self :Tuple ,*__lowercase :Optional[int] ):
for attr in list(self.original ):
setattr(self.obj ,__lowercase ,self.original.pop(__lowercase ) )
def __lowerCamelCase ( self :Tuple ):
self.__enter__()
self._active_patches.append(self )
def __lowerCamelCase ( self :Dict ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 230 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase : bool = field(default=__lowerCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class a :
__lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase : bool = field(
default=__lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
snake_case__ : int = import_module('''tasks''' )
try:
snake_case__ : Optional[int] = getattr(__lowerCAmelCase , model_args.task_type )
snake_case__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ : Optional[int] = token_classification_task.get_labels(data_args.labels )
snake_case__ : Dict[int, str] = dict(enumerate(__lowerCAmelCase ) )
snake_case__ : Optional[Any] = len(__lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
snake_case__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ : Tuple = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ : Dict = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCAmelCase , __lowerCAmelCase ) -> Tuple[List[int], List[int]]:
snake_case__ : Any = np.argmax(__lowerCAmelCase , axis=2 )
snake_case__ , snake_case__ : List[Any] = preds.shape
snake_case__ : List[Any] = [[] for _ in range(__lowerCAmelCase )]
snake_case__ : int = [[] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCAmelCase ) -> Dict:
snake_case__ , snake_case__ : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"precision": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"recall": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"f1": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
# Data collator
snake_case__ : Any = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Tuple = trainer.evaluate()
snake_case__ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCAmelCase )
# Predict
if training_args.do_predict:
snake_case__ : Optional[Any] = TokenClassificationDataset(
token_classification_task=__lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=__lowerCAmelCase , labels=__lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ , snake_case__ , snake_case__ : List[str] = trainer.predict(__lowerCAmelCase )
snake_case__ , snake_case__ : int = align_predictions(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCAmelCase , __lowerCAmelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
snake_case__ : Any = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return results
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 230 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( _lowerCAmelCase : int ):
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self : Union[str, Any] ):
raise NotImplementedError() | 360 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 210 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
_UpperCamelCase = Dataset.from_dict(lowerCamelCase__ )
return dataset
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = get_dataset()
_UpperCamelCase = make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = get_dataset()
_UpperCamelCase = deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 324 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A__ : Any = True
except (ImportError, ModuleNotFoundError):
A__ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _snake_case ( lowerCamelCase__ : str ) -> str:
re.sub("<n>" , "" , lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 144 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
from torch.utils.cpp_extension import load
lowerCamelCase = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
lowerCamelCase = [
root / filename
for filename in [
"vision.cpp",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 351 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = os.path.join(
_a , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCamelCase = torch.load(_a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCamelCase = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("""Training examples: %s""" , len(_a ) )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase = tf.data.Dataset.from_generator(
_a , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = []
for i, line in enumerate(_a ):
if i == 0:
continue
lowerCamelCase = """%s-%s""" % (set_type, line[0])
lowerCamelCase = line[5]
lowerCamelCase = line[6]
lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )}
lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
lowerCamelCase = label_map[example.label] if example.label in label_map else 0
lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : str = {
"""hans""": HansProcessor,
}
| 168 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase : Optional[Any] = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet in self.resnets:
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]=True )-> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=True )-> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase = []
for _ in range(self.num_layers ):
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
def __call__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any=True )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states
| 360 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase__:
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : int="resnet50" , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=True , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = out_indices if out_indices is not None else [4]
UpperCAmelCase = stage_names
UpperCAmelCase = out_features
UpperCAmelCase = backbone
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = is_training
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = TimmBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[str] = (TimmBackbone,) if is_torch_available() else ()
__magic_name__ : Any = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__magic_name__ : Union[str, Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
__magic_name__ : List[str] = False
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = TimmBackboneModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '''resnet18'''
UpperCAmelCase = '''microsoft/resnet-18'''
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase , out_indices=[1, 2, 3] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a__( self : Dict )-> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a__( self : str )-> str:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a__( self : Any )-> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : List[Any] )-> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a__( self : str )-> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__( self : int )-> Tuple:
"""simple docstring"""
pass
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase = self.all_model_classes[0]
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase = copy.deepcopy(lowerCAmelCase )
UpperCAmelCase = None
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase = copy.deepcopy(lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
| 91 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Any = {
"""camembert-base""": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = """▁"""
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =CamembertTokenizer
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=["<s>NOTUSED", "</s>NOTUSED"] , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : str = vocab_file
__snake_case : int = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
__snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[str] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 102 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModel.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModel.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ ,output_loading_info=lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def _UpperCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
def _UpperCamelCase ( self ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
def _UpperCamelCase ( self ):
UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
UpperCAmelCase = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ ,from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ ,lowerCAmelCase_ )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) ,14_410 )
| 365 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""projector.weight"""]
UpperCAmelCase = downstream_dict["""projector.bias"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""model.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = WavaVecaForXVector.from_pretrained(_snake_case , config=_snake_case )
UpperCAmelCase = downstream_dict["""connector.weight"""]
UpperCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = torch.load(_snake_case , map_location="""cpu""" )
UpperCAmelCase = checkpoint["""Downstream"""]
UpperCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_snake_case , return_attention_mask=_snake_case , do_normalize=_snake_case )
UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase = convert_classification(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase = convert_diarization(_snake_case , _snake_case , _snake_case )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase = convert_xvector(_snake_case , _snake_case , _snake_case )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_snake_case )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 234 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__UpperCamelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
__UpperCamelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class UpperCamelCase ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = """whisper"""
SCREAMING_SNAKE_CASE_ = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self, lowerCAmelCase__=5_1865, lowerCAmelCase__=80, lowerCAmelCase__=6, lowerCAmelCase__=4, lowerCAmelCase__=6, lowerCAmelCase__=4, lowerCAmelCase__=1536, lowerCAmelCase__=1536, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=5_0257, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="gelu", lowerCAmelCase__=256, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=False, lowerCAmelCase__=1500, lowerCAmelCase__=448, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=None, lowerCAmelCase__=[220, 5_0256], lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=False, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__=7, **lowerCAmelCase__, ) -> List[str]:
snake_case_ = vocab_size
snake_case_ = num_mel_bins
snake_case_ = d_model
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_ffn_dim
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = max_source_positions
snake_case_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
snake_case_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
snake_case_ = median_filter_width
super().__init__(
pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, is_encoder_decoder=lowerCAmelCase__, decoder_start_token_id=lowerCAmelCase__, suppress_tokens=lowerCAmelCase__, begin_suppress_tokens=lowerCAmelCase__, **lowerCAmelCase__, )
class UpperCamelCase ( __UpperCamelCase ):
@property
def a_ ( self) -> Dict:
snake_case_ = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
])
if self.use_past:
snake_case_ = {0: """batch"""}
else:
snake_case_ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__, direction='inputs')
return common_inputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = 2_2050, lowerCAmelCase__ = 5.0, lowerCAmelCase__ = 220, ) -> Optional[Any]:
snake_case_ = OrderedDict()
snake_case_ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=lowerCAmelCase__, framework=lowerCAmelCase__, sampling_rate=lowerCAmelCase__, time_duration=lowerCAmelCase__, frequency=lowerCAmelCase__, )
snake_case_ = encoder_inputs["""input_features"""].shape[2]
snake_case_ = encoder_sequence_length // 2 if self.use_past else seq_length
snake_case_ = super().generate_dummy_inputs(
preprocessor.tokenizer, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = encoder_inputs.pop('input_features')
snake_case_ = decoder_inputs.pop('decoder_input_ids')
if "past_key_values" in decoder_inputs:
snake_case_ = decoder_inputs.pop('past_key_values')
return dummy_inputs
@property
def a_ ( self) -> Tuple:
return 1e-3
| 356 | """simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 0 |
def _a ( SCREAMING_SNAKE_CASE_ : int ): # noqa: E741
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = 0
__lowerCAmelCase = [0] * n
__lowerCAmelCase = [False] * n
__lowerCAmelCase = [False] * n
def dfs(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ):
if parent == root:
out_edge_count += 1
__lowerCAmelCase = True
__lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
__lowerCAmelCase = True
else:
__lowerCAmelCase = min(low[at] , SCREAMING_SNAKE_CASE_ )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE_ ):
if not visited[i]:
__lowerCAmelCase = 0
__lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE_ ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE_ )
# Adjacency list of graph
UpperCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 92 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a :Optional[Any] = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 132 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : Dict = json.loads(f.read() )
_a : Tuple = collections.OrderedDict()
_a : List[str] = collections.OrderedDict()
_a : Any = collections.OrderedDict()
with open(__a ,'''r''' ,encoding='''utf-8''' ) as f:
_a : List[str] = f.readlines()
_a : Dict = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(__a ):
_a : List[str] = b
_a : Optional[Any] = idx
for wd in b:
_a : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , _a , _a , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|startoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> Optional[Any]:
super().__init__(
unk_token=_a , pad_token=_a , bos_token=_a , eos_token=_a , do_clean_text=_a , **_a , )
if not os.path.isfile(_a ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_a ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
_a : Dict = do_clean_text
_a , _a , _a , _a : Any = load_vocab_and_emoji(_a , _a )
_a : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowercase ( self ) -> Any:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowercase ( self ) -> List[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowercase ( self , _a ) -> Tuple:
return self.subword_tokenizer.tokenize(_a , clean=self.do_clean_text )
def __lowercase ( self , _a ) -> Any:
return self.vocab.get(_a , self.vocab.get(self.unk_token ) )
def __lowercase ( self , _a ) -> Dict:
return self.subword_tokenizer.convert_id_to_token(_a )
def __lowercase ( self , _a ) -> Dict:
_a : Optional[int] = ''''''.join(_a ).strip()
return out_string
def __lowercase ( self , _a ) -> List[int]:
_a : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_a : Dict = input_ids[-self.model_max_length :]
return input_ids
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
_a : int = 0
if os.path.isdir(_a ):
_a : Any = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : Dict = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
_a : str = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
_a : Optional[int] = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
_a : Tuple = token_index
writer.write(''','''.join(_a ) + '''\n''' )
index += 1
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _a )
return vocab_file, emoji_file
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> Optional[Any]:
_a : Tuple = vocab # same as swe
_a : str = ids_to_tokens # same as bpe
_a : str = emoji
_a : List[str] = np.max([len(_a ) for w in self.vocab.keys()] )
_a : int = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
_a : Any = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
_a : Union[str, Any] = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
_a : Any = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_a : Union[str, Any] = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
_a : int = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
_a : Union[str, Any] = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
_a : Any = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
_a : str = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Optional[Any]:
return len(self.ids_to_tokens )
def __lowercase ( self , _a ) -> int:
_a : Union[str, Any] = self.content_repattera.sub('''<URL>''' , _a )
_a : List[Any] = self.content_repattera.sub('''<EMAIL>''' , _a )
_a : Optional[Any] = self.content_repattera.sub('''<TEL>''' , _a )
_a : str = self.content_repattera.sub('''<DATE>''' , _a )
_a : Union[str, Any] = self.content_repattera.sub('''<DATE>''' , _a )
_a : Dict = self.content_repattera.sub('''<PRICE>''' , _a )
_a : Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_a : Union[str, Any] = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def __lowercase ( self , _a , _a=False ) -> Dict:
_a : List[str] = text.replace(''' ''' , '''<SP>''' )
_a : Any = text.replace(''' ''' , '''<SP>''' )
_a : Any = text.replace('''\r\n''' , '''<BR>''' )
_a : int = text.replace('''\n''' , '''<BR>''' )
_a : Optional[int] = text.replace('''\r''' , '''<BR>''' )
_a : Union[str, Any] = text.replace('''\t''' , '''<TAB>''' )
_a : Optional[Any] = text.replace('''—''' , '''ー''' )
_a : Tuple = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
_a : Union[str, Any] = text.replace(_a , _a )
if clean:
_a : int = self.clean_text(_a )
def check_simbol(_a ):
_a : str = x.encode()
if len(_a ) == 1 and len(_a ) == 2:
_a : int = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2_a_1 and c <= 0xc_2_b_f)
or (c >= 0xc_7_8_0 and c <= 0xc_7_8_3)
or (c >= 0xc_a_b_9 and c <= 0xc_b_b_f)
or (c >= 0xc_c_8_0 and c <= 0xc_d_a_2)
):
return True
return False
def checkuae(_a ):
_a : Any = x.encode()
if len(_a ) == 1 and len(_a ) == 3:
_a : List[Any] = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe_2_8_0_8_0 and c <= 0xe_2_b_0_7_f:
return True
return False
_a : str = 0
_a : Optional[Any] = []
while pos < len(_a ):
_a : int = min(len(_a ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
_a : List[Any] = [] # (token_id, token, pos)
for e in range(_a , _a , -1 ):
_a : Tuple = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_a ) > 2:
_a : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_a ) > 0:
# the smallest token_id is adopted
_a , _a , _a : List[Any] = sorted(_a , key=lambda _a : x[0] )[0]
result.append(_a )
_a : Tuple = e
else:
_a : Optional[int] = pos + 1
_a : str = text[pos:end]
if check_simbol(_a ):
result.append('''<KIGOU>''' )
elif checkuae(_a ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
_a : str = end
return result
def __lowercase ( self , _a , _a="\n" ) -> List[str]:
_a : Optional[int] = []
_a : str = []
_a : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_a ) > 0:
words.append(bytearray(_a ).decode('''utf-8''' , errors='''replace''' ) )
_a : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_a )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_a )
if len(_a ) > 0:
words.append(bytearray(_a ).decode('''utf-8''' , errors='''replace''' ) )
_a : str = ''''''.join(_a )
return text
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = []
for _ in range(_lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase (_lowerCAmelCase , _lowerCAmelCase=10 ):
__lowerCAmelCase = []
for step in range(_lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(_lowerCAmelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , _lowerCAmelCase )
__lowerCAmelCase = torch.load(_lowerCAmelCase )
scheduler.load_state_dict(_lowerCAmelCase )
return lrs
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCAmelCase = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_000 ):
__lowerCAmelCase = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_snake_case = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_snake_case = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_snake_case = 1_0
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> str:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__lowerCAmelCase , __lowerCAmelCase = data
__lowerCAmelCase = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCAmelCase = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCAmelCase = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__lowerCAmelCase = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> Dict:
__lowerCAmelCase = fn
def __call__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def A__ ( self , snake_case_ ) -> Tuple:
__lowerCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 301 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase = Path(_lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__lowerCAmelCase = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , _lowerCAmelCase , with_cuda=_lowerCAmelCase , extra_include_paths=[str(_lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 301 | 1 |
from string import ascii_lowercase, ascii_uppercase
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
if not sentence:
return ""
lowerCAmelCase : Dict = dict(zip(_UpperCAmelCase, _UpperCAmelCase ) )
return lower_to_upper.get(sentence[0], sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 323 |
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
lowerCAmelCase : Any = sum(i * i for i in range(1, n + 1 ) )
lowerCAmelCase : str = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 323 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __magic_name__ ( A : int, A : List[str] ):
'''simple docstring'''
a = old_name
if "patch_embed" in old_name:
a , a , a = old_name.split("." )
if layer == "0":
a = old_name.replace("0", "convolution1" )
elif layer == "1":
a = old_name.replace("1", "batchnorm_before" )
elif layer == "3":
a = old_name.replace("3", "convolution2" )
else:
a = old_name.replace("4", "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d", A ):
a = R"\b\d{2}\b"
if bool(re.search(A, A ) ):
a = re.search(R"\d\.\d\d.", A ).group()
else:
a = re.search(R"\d\.\d.", A ).group()
if int(match[0] ) < 6:
a = old_name.replace(A, "" )
a = trimmed_name.replace("network", match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a = "intermediate_stages." + trimmed_name
else:
a = old_name.replace(A, "" )
if int(match[2] ) < num_meta4D_last_stage:
a = trimmed_name.replace("network", "meta4D_layers.blocks." + match[2] )
else:
a = str(int(match[2] ) - num_meta4D_last_stage )
a = trimmed_name.replace("network", "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a = trimmed_name.replace("norm1", "layernorm1" )
elif "norm2" in old_name:
a = trimmed_name.replace("norm2", "layernorm2" )
elif "fc1" in old_name:
a = trimmed_name.replace("fc1", "linear_in" )
elif "fc2" in old_name:
a = trimmed_name.replace("fc2", "linear_out" )
a = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d.", A ):
a = old_name.replace("network", "intermediate_stages" )
if "fc" in new_name:
a = new_name.replace("fc", "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a = new_name.replace("norm1", "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a = new_name.replace("norm2", "batchnorm_after" )
if "proj" in new_name:
a = new_name.replace("proj", "projection" )
if "dist_head" in new_name:
a = new_name.replace("dist_head", "distillation_classifier" )
elif "head" in new_name:
a = new_name.replace("head", "classifier" )
elif "patch_embed" in new_name:
a = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a = new_name.replace("norm", "layernorm" )
a = "efficientformer." + new_name
else:
a = "efficientformer.encoder." + new_name
return new_name
def __magic_name__ ( A : Any, A : List[Any] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
a = checkpoint.pop(A )
a = val
return checkpoint
def __magic_name__ ( ):
'''simple docstring'''
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(A, stream=A ).raw )
return image
def __magic_name__ ( A : Path, A : Path, A : Path, A : bool ):
'''simple docstring'''
a = torch.load(A, map_location="cpu" )["model"]
a = EfficientFormerConfig.from_json_file(A )
a = EfficientFormerForImageClassificationWithTeacher(A )
a = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a = config.depths[-1] - config.num_metaad_blocks + 1
a = convert_torch_checkpoint(A, A )
model.load_state_dict(A )
model.eval()
a = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a = prepare_img()
a = 256
a = 224
a = EfficientFormerImageProcessor(
size={"shortest_edge": image_size}, crop_size={"height": crop_size, "width": crop_size}, resample=pillow_resamplings["bicubic"], )
a = processor(images=A, return_tensors="pt" ).pixel_values
# original processing pipeline
a = Compose(
[
Resize(A, interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A ),
ToTensor(),
Normalize(A, A ),
] )
a = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A, A )
a = model(A )
a = outputs.logits
a = (1, 1000)
if "l1" in model_name:
a = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10], A, atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10], A, atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(A )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message="Add model", use_temp_dir=A, )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message="Add image processor", use_temp_dir=A, )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 107 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
__magic_name__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 152 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__magic_name__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
# Initialise PyTorch model
__a : int = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"""Building PyTorch model from configuration: {config}""" )
__a : Optional[Any] = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowercase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 27 | import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> str:
'''simple docstring'''
__lowercase = psutil.Process()
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = -1
while True:
__lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = True
__lowercase = threading.Thread(target=self.peak_monitor )
__lowercase = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = False
self.thread.join()
return self.cpu_memory_peak
__a : List[str] = PeakCPUMemory()
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowercase = (torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
__lowercase = (torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB" )
__lowercase = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase = 300 # TEMPERATURE (unit = K)
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive")
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive")
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive")
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration")
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration")
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Dict = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : str = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Union[str, Any] = '3.0.12'
lowerCAmelCase_ : Any = None
def _lowerCamelCase ( ) -> Dict:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : List[str] ):
_a = lock_file
return None
def __str__( self : Optional[Any] ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : Optional[Any] ):
_a = lock
return None
def __enter__( self : Union[str, Any] ):
return self.lock
def __exit__( self : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : List[Any]=-1 , __a : List[str]=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : str ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : List[str] ):
raise NotImplementedError()
def UpperCamelCase__ ( self : Optional[Any] ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : List[str] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : Dict , __a : Any=None , __a : Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Optional[int] , __a : Optional[int]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : str , __a : Optional[int] ):
self.release()
return None
def __del__( self : Union[str, Any] ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : str , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : Tuple , __a : Union[str, Any]=-1 , __a : List[str]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : Optional[int] ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : int ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : List[Any] , __a : str=-1 , __a : List[Any]=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Dict ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : int ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Any ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : Tuple = None
if msvcrt:
lowerCAmelCase_ : Union[str, Any] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : Tuple = UnixFileLock
else:
lowerCAmelCase_ : Tuple = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 63 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a_ : int = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a_ : int = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase = BartTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> List[Any]:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = getattr(__magic_name__ , pre_tok_state.pop('type' ) )
_a = add_prefix_space
_a = pre_tok_class(**__magic_name__ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = 'post_processor'
_a = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['sep'] )
if "cls" in state:
_a = tuple(state['cls'] )
_a = False
if state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('trim_offsets' , __magic_name__ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(__magic_name__ , state.pop('type' ) )
_a = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
_a = value
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Any:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 168 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = 0
for ch in input_str:
_lowerCAmelCase = ord(lowerCAmelCase )
_lowerCAmelCase = pow(2 , lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase : List[str] = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
lowerCamelCase : str = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
lowerCamelCase : Optional[int] = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : List[Any] , _a : Union[str, Any] , _a : Optional[int] , _a : str=None , _a : int=True , _a : Tuple=False ) -> List[str]:
'''simple docstring'''
if rouge_types is None:
_SCREAMING_SNAKE_CASE =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_SCREAMING_SNAKE_CASE =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_ )
if use_aggregator:
_SCREAMING_SNAKE_CASE =scoring.BootstrapAggregator()
else:
_SCREAMING_SNAKE_CASE =[]
for ref, pred in zip(lowercase_ , lowercase_ ):
_SCREAMING_SNAKE_CASE =scorer.score(lowercase_ , lowercase_ )
if use_aggregator:
aggregator.add_scores(lowercase_ )
else:
scores.append(lowercase_ )
if use_aggregator:
_SCREAMING_SNAKE_CASE =aggregator.aggregate()
else:
_SCREAMING_SNAKE_CASE ={}
for key in scores[0]:
_SCREAMING_SNAKE_CASE =[score[key] for score in scores]
return result
| 47 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase_ : Optional[int] = """src/transformers"""
UpperCAmelCase_ : Tuple = """docs/source/en"""
UpperCAmelCase_ : Optional[Any] = """."""
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while not lines[start_index].startswith(__a ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Tuple = start_index
while not lines[end_index].startswith(__a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a )
return [m.group(0 ) for m in matches]
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a )
SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2
SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a )
# Let's lookup through all transformers object (once).
for attr_name in dir(__a ):
SCREAMING_SNAKE_CASE_ : Any = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers
SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers
SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : int = tf_models
SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : Any = flax_models
SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : str = pt_models
SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE_ : List[str] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns]
SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE_ : int = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n"
return table
def _A (__a=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file(
filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 91 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Any = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __SCREAMING_SNAKE_CASE ( snake_case__):
_SCREAMING_SNAKE_CASE : Dict = '''unispeech'''
def __init__( self , _UpperCamelCase=32 , _UpperCamelCase=7_68 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=30_72 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase="group" , _UpperCamelCase="gelu" , _UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase=False , _UpperCamelCase=1_28 , _UpperCamelCase=16 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=0.05 , _UpperCamelCase=10 , _UpperCamelCase=2 , _UpperCamelCase=0.0 , _UpperCamelCase=10 , _UpperCamelCase=0 , _UpperCamelCase=3_20 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , _UpperCamelCase=1_00 , _UpperCamelCase=2_56 , _UpperCamelCase=2_56 , _UpperCamelCase=0.1 , _UpperCamelCase="mean" , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=2_56 , _UpperCamelCase=80 , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=2 , _UpperCamelCase=0.5 , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(UpperCAmelCase_ )
lowerCAmelCase__ = list(UpperCAmelCase_ )
lowerCAmelCase__ = list(UpperCAmelCase_ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_ctc_classes
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
lowerCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
lowerCAmelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = feat_quantizer_dropout
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# pretraining loss
lowerCAmelCase__ = replace_prob
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 365 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=10 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModel(config=_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_UpperCamelCase , return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**_UpperCamelCase )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase__ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCamelCase , atol=1E-4 ) )
| 122 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : str =UnCLIPImageVariationPipeline
lowercase : Optional[Any] =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase : List[str] =IMAGE_VARIATION_BATCH_PARAMS
lowercase : Optional[int] =[
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, )
return CLIPVisionModelWithProjection(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
lowerCamelCase_ =UnCLIPTextProjModel(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
lowerCamelCase_ =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(1 )
lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_decoder
lowerCamelCase_ =self.dummy_text_proj
lowerCamelCase_ =self.dummy_text_encoder
lowerCamelCase_ =self.dummy_tokenizer
lowerCamelCase_ =self.dummy_super_res_first
lowerCamelCase_ =self.dummy_super_res_last
lowerCamelCase_ =UnCLIPScheduler(
variance_type='''learned_range''', prediction_type='''epsilon''', num_train_timesteps=1_000, )
lowerCamelCase_ =UnCLIPScheduler(
variance_type='''fixed_small_log''', prediction_type='''epsilon''', num_train_timesteps=1_000, )
lowerCamelCase_ =CLIPImageProcessor(crop_size=32, size=32 )
lowerCamelCase_ =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
if pil_image:
lowerCamelCase_ =input_image * 0.5 + 0.5
lowerCamelCase_ =input_image.clamp(0, 1 )
lowerCamelCase_ =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase_ =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =[
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =[
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase_ =np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.device('''cpu''' )
class __UpperCamelCase :
lowercase : Union[str, Any] =1
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe.decoder.dtype
lowerCamelCase_ =1
lowerCamelCase_ =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase_ =pipe.prepare_latents(
lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() )
lowerCamelCase_ =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase_ =pipe.prepare_latents(
lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase ).images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
# Don't pass image, instead pass embedding
lowerCamelCase_ =pipeline_inputs.pop('''image''' )
lowerCamelCase_ =pipe.image_encoder(lowerCAmelCase ).image_embeds
lowerCamelCase_ =pipe(
**lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase, image_embeddings=lowerCAmelCase, ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase_ =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase, expected_max_diff=lowerCAmelCase )
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
lowerCamelCase_ =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase_ =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase )
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
lowerCamelCase_ =UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''', torch_dtype=torch.floataa )
lowerCamelCase_ =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =pipeline(
lowerCAmelCase, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase, 15 )
| 75 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase (*__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" ) as fh:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_EX )
try:
print(*__lowerCAmelCase )
finally:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_UN )
lowerCamelCase__ = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
lowerCamelCase__ = torch.device('cuda', local_rank)
lowerCamelCase__ = socket.gethostname()
lowerCamelCase__ = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCamelCase__ = dist.get_rank()
lowerCamelCase__ = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 234 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCAmelCase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCamelCase : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
lowerCAmelCase = CursorInfo()
lowerCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
lowerCAmelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
lowerCAmelCase = CursorInfo()
lowerCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
lowerCAmelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 365 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__snake_case =logging.getLogger(__name__)
torch.set_grad_enabled(False)
__snake_case ="""cuda""" if torch.cuda.is_available() else """cpu"""
def a_ ( lowerCamelCase : str , lowerCamelCase : int=100 , lowerCamelCase : List[Any]=" " ):
lowerCAmelCase = text.split(lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
def a_ ( lowerCamelCase : dict ):
lowerCAmelCase , lowerCAmelCase = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(lowerCamelCase ):
titles.append(title if title is not None else '' )
texts.append(lowerCamelCase )
return {"title": titles, "text": texts}
def a_ ( lowerCamelCase : dict , lowerCamelCase : DPRContextEncoder , lowerCamelCase : DPRContextEncoderTokenizerFast ):
lowerCAmelCase = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase = ctx_encoder(input_ids.to(device=lowerCamelCase ) , return_dict=lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a_ ( lowerCamelCase : "RagExampleArguments" , lowerCamelCase : "ProcessingArguments" , lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase )
lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase = dataset.map(
partial(lowerCamelCase , ctx_encoder=lowerCamelCase , ctx_tokenizer=lowerCamelCase ) , batched=lowerCamelCase , batch_size=processing_args.batch_size , features=lowerCamelCase , )
# And finally save your dataset
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase )
# And save the index
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCamelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCamelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCamelCase : Optional[str] = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCamelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__snake_case =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__snake_case , __snake_case , __snake_case =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__snake_case =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 55 | 0 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : Union[str, Any] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 28 |
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__snake_case = datasets.logging.get_logger(__name__)
__snake_case = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__snake_case = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__snake_case = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="dummy_doc" ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {doc: key_lines}
SCREAMING_SNAKE_CASE__ = {doc: sys_lines}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader.get_doc_mentions(UpperCamelCase_ , key_doc_lines[doc] , UpperCamelCase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ = reader.set_annotated_parse_trees(UpperCamelCase_ , key_doc_lines[doc] , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader.get_doc_mentions(UpperCamelCase_ , sys_doc_lines[doc] , UpperCamelCase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE__ = reader.set_annotated_parse_trees(UpperCamelCase_ , key_doc_lines[doc] , UpperCamelCase_ , UpperCamelCase_ )
if remove_nested:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader.remove_nested_coref_mentions(UpperCamelCase_ , UpperCamelCase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reader.remove_nested_coref_mentions(UpperCamelCase_ , UpperCamelCase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE__ = reader.get_mention_assignments(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = reader.get_mention_assignments(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_coref_infos(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = evaluator.evaluate_documents(UpperCamelCase_ , UpperCamelCase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def _lowercase ( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE__ = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def A_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[Any]=False ):
SCREAMING_SNAKE_CASE__ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE__ = util.check_gold_parse_annotation(UpperCAmelCase_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE__ = evaluate(
key_lines=UpperCAmelCase_ , sys_lines=UpperCAmelCase_ , metrics=UpperCAmelCase_ , NP_only=UpperCAmelCase_ , remove_nested=UpperCAmelCase_ , keep_singletons=UpperCAmelCase_ , min_span=UpperCAmelCase_ , )
return score
| 169 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE :Dict = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE :Dict = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = json.loads(f.read() )
__A = collections.OrderedDict()
__A = collections.OrderedDict()
__A = collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as f:
__A = f.readlines()
__A = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(a_ ):
__A = b
__A = idx
for wd in b:
__A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : str ,A : int ,A : Union[str, Any] ,A : List[str]="<|endoftext|>" ,A : int="<|endoftext|>" ,A : Any="<|startoftext|>" ,A : Dict="<|endoftext|>" ,A : Optional[Any]=False ,**A : str ,):
super().__init__(
unk_token=A ,pad_token=A ,bos_token=A ,eos_token=A ,do_clean_text=A ,**A ,)
if not os.path.isfile(A ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(A ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
__A = do_clean_text
__A , __A , __A , __A = load_vocab_and_emoji(A ,A )
__A = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCamelCase_ ( self : Any ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCamelCase_ ( self : List[Any] ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCamelCase_ ( self : Tuple ,A : Optional[int] ):
return self.subword_tokenizer.tokenize(A ,clean=self.do_clean_text )
def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ):
return self.vocab.get(A ,self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : str ,A : List[str] ):
return self.subword_tokenizer.convert_id_to_token(A )
def UpperCamelCase_ ( self : Optional[Any] ,A : str ):
__A = "".join(A ).strip()
return out_string
def UpperCamelCase_ ( self : Optional[int] ,A : "Conversation" ):
__A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A ,add_special_tokens=A ) + [self.eos_token_id] )
if len(A ) > self.model_max_length:
__A = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
__A = 0
if os.path.isdir(A ):
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
__A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__A = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(A ,"w" ,encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
__A = token_index
writer.write(",".join(A ) + "\n" )
index += 1
with open(A ,"w" ,encoding="utf-8" ) as writer:
json.dump(self.emoji ,A )
return vocab_file, emoji_file
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A : int ,A : Union[str, Any] ,A : Optional[Any] ):
__A = vocab # same as swe
__A = ids_to_tokens # same as bpe
__A = emoji
__A = np.max([len(A ) for w in self.vocab.keys()] )
__A = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
__A = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
__A = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
__A = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
__A = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__A = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__A = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Union[str, Any] ):
return len(self.ids_to_tokens )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Any ):
__A = self.content_repattera.sub("<URL>" ,A )
__A = self.content_repattera.sub("<EMAIL>" ,A )
__A = self.content_repattera.sub("<TEL>" ,A )
__A = self.content_repattera.sub("<DATE>" ,A )
__A = self.content_repattera.sub("<DATE>" ,A )
__A = self.content_repattera.sub("<PRICE>" ,A )
__A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__A = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" )
return content
def UpperCamelCase_ ( self : str ,A : List[str] ,A : Optional[Any]=False ):
__A = text.replace(" " ,"<SP>" )
__A = text.replace(" " ,"<SP>" )
__A = text.replace("\r\n" ,"<BR>" )
__A = text.replace("\n" ,"<BR>" )
__A = text.replace("\r" ,"<BR>" )
__A = text.replace("\t" ,"<TAB>" )
__A = text.replace("—" ,"ー" )
__A = text.replace("−" ,"ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
__A = text.replace(A ,A )
if clean:
__A = self.clean_text(A )
def check_simbol(A : str ):
__A = x.encode()
if len(A ) == 1 and len(A ) == 2:
__A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(A : str ):
__A = x.encode()
if len(A ) == 1 and len(A ) == 3:
__A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
__A = 0
__A = []
while pos < len(A ):
__A = min(len(A ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
__A = [] # (token_id, token, pos)
for e in range(A ,A ,-1 ):
__A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A ) > 2:
__A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A ) > 0:
# the smallest token_id is adopted
__A , __A , __A = sorted(A ,key=lambda A : x[0] )[0]
result.append(A )
__A = e
else:
__A = pos + 1
__A = text[pos:end]
if check_simbol(A ):
result.append("<KIGOU>" )
elif checkuae(A ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
__A = end
return result
def UpperCamelCase_ ( self : Optional[Any] ,A : List[str] ,A : Any="\n" ):
__A = []
__A = []
__A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A ) > 0:
words.append(bytearray(A ).decode("utf-8" ,errors="replace" ) )
__A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(A )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(A )
if len(A ) > 0:
words.append(bytearray(A ).decode("utf-8" ,errors="replace" ) )
__A = "".join(A )
return text
| 15 |
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str]=False ) -> Optional[Any]:
try:
UpperCamelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase : Optional[Any] = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase ( snake_case__ : int ) -> str:
return unittest.skip('Test was skipped' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Any:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case__ , version=snake_case__ )
return unittest.skipUnless(is_torch_version('>=' , snake_case__ ) , F"""test requires torch version >= {version}""" )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(snake_case__ )
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(snake_case__ )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(snake_case__ )
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = True
@classmethod
def snake_case_ ( cls ) -> Any:
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ) -> Tuple:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = mocks if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( snake_case__ : Tuple ) -> Optional[int]:
UpperCamelCase : Tuple = AcceleratorState()
UpperCamelCase : Tuple = tensor[None].clone().to(state.device )
UpperCamelCase : str = gather(snake_case__ ).cpu()
UpperCamelCase : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case__ ):
return False
return True
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = returncode
UpperCamelCase : Tuple = stdout
UpperCamelCase : Any = stderr
async def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Union[str, Any]:
while True:
UpperCamelCase : List[str] = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(snake_case__ ) )
UpperCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase : Tuple = []
UpperCamelCase : int = []
def tee(snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]="" ):
UpperCamelCase : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ , snake_case__ , file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=snake_case__ , )
return _RunOutput(await p.wait() , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Any=180 , snake_case__ : Any=False , snake_case__ : Optional[int]=True ) -> _RunOutput:
UpperCamelCase : int = asyncio.get_event_loop()
UpperCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) )
UpperCamelCase : str = ' '.join(snake_case__ )
if result.returncode > 0:
UpperCamelCase : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( a__ ):
pass
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str=False ) -> int:
try:
UpperCamelCase : Union[str, Any] = subprocess.check_output(snake_case__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ , 'decode' ):
UpperCamelCase : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 368 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( a__ ):
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> str:
raise NotImplementedError()
| 103 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.