code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCamelCase: Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase: List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class a__ ( lowerCamelCase__ ):
_lowerCamelCase = 'blenderbot-small'
_lowerCamelCase = ['past_key_values']
_lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any, lowerCAmelCase : List[Any]=50265, lowerCAmelCase : Dict=512, lowerCAmelCase : Optional[int]=8, lowerCAmelCase : Optional[Any]=2048, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Optional[int]=8, lowerCAmelCase : Union[str, Any]=2048, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Union[str, Any]=0.0, lowerCAmelCase : List[Any]=0.0, lowerCAmelCase : List[Any]=True, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : List[str]="gelu", lowerCAmelCase : Dict=512, lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : List[Any]=0.0, lowerCAmelCase : int=0.0, lowerCAmelCase : Dict=0.02, lowerCAmelCase : int=1, lowerCAmelCase : int=False, lowerCAmelCase : int=0, lowerCAmelCase : List[Any]=1, lowerCAmelCase : Dict=2, lowerCAmelCase : List[Any]=2, **lowerCAmelCase : int, ) -> str:
lowercase : Optional[Any] = vocab_size
lowercase : List[str] = max_position_embeddings
lowercase : List[str] = d_model
lowercase : int = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Any = encoder_attention_heads
lowercase : Optional[Any] = decoder_ffn_dim
lowercase : Any = decoder_layers
lowercase : Any = decoder_attention_heads
lowercase : Union[str, Any] = dropout
lowercase : Dict = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Any = init_std
lowercase : int = encoder_layerdrop
lowercase : Tuple = decoder_layerdrop
lowercase : int = use_cache
lowercase : str = encoder_layers
lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case, bos_token_id=__snake_case, eos_token_id=__snake_case, is_encoder_decoder=__snake_case, decoder_start_token_id=__snake_case, forced_eos_token_id=__snake_case, **__snake_case, )
class a__ ( lowerCamelCase__ ):
@property
def lowercase ( self : List[str] ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase : Any = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase : Tuple = {0: 'batch'}
lowercase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase : Tuple = {0: 'batch', 1: 'decoder_sequence'}
lowercase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : Any = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
lowercase : int = self.num_layers
for i in range(__snake_case ):
lowercase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
lowercase : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def lowercase ( self : Optional[int] ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
lowercase : List[str] = super().outputs
else:
lowercase : Optional[int] = super(__snake_case, self ).outputs
if self.use_past:
lowercase : Optional[Any] = self.num_layers
for i in range(__snake_case ):
lowercase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
lowercase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Optional[int]:
lowercase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
# Generate decoder inputs
lowercase : List[str] = seq_length if not self.use_past else 1
lowercase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
lowercase : Dict = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase : Tuple = dict(**__snake_case, **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase : Union[str, Any] = common_inputs['input_ids'].shape
lowercase : List[str] = common_inputs['decoder_input_ids'].shape[1]
lowercase : List[str] = self.num_attention_heads
lowercase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Optional[int] = decoder_seq_length + 3
lowercase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Dict = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__snake_case, __snake_case )], dim=1 )
lowercase : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase : List[Any] = self.num_layers
lowercase : List[Any] = min(__snake_case, __snake_case )
lowercase : Any = max(__snake_case, __snake_case ) - min_num_layers
lowercase : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
lowercase : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__snake_case, __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def lowercase ( self : List[str], lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> int:
lowercase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase : List[Any] = seqlen + 2
lowercase : List[str] = self.num_layers
lowercase : Union[str, Any] = self.num_attention_heads
lowercase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : str = common_inputs['attention_mask'].dtype
lowercase : List[str] = torch.cat(
[common_inputs['attention_mask'], torch.ones(__snake_case, __snake_case, dtype=__snake_case )], dim=1 )
lowercase : str = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def lowercase ( self : Optional[int], lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Optional[int]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : int = compute_effective_axis_dimension(
__snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : Any = tokenizer.num_special_tokens_to_add(__snake_case )
lowercase : Any = compute_effective_axis_dimension(
__snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
lowercase : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : int = dict(tokenizer(__snake_case, return_tensors=__snake_case ) )
return common_inputs
def lowercase ( self : Any, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int = -1, lowerCAmelCase : int = -1, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[TensorType] = None, ) -> Any:
if self.task in ["default", "seq2seq-lm"]:
lowercase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case, batch_size=__snake_case, seq_length=__snake_case, is_pair=__snake_case, framework=__snake_case )
elif self.task == "causal-lm":
lowercase : int = self._generate_dummy_inputs_for_causal_lm(
__snake_case, batch_size=__snake_case, seq_length=__snake_case, is_pair=__snake_case, framework=__snake_case )
else:
lowercase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case, batch_size=__snake_case, seq_length=__snake_case, is_pair=__snake_case, framework=__snake_case )
return common_inputs
def lowercase ( self : str, lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int], lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> int:
if self.task in ["default", "seq2seq-lm"]:
lowercase : int = super()._flatten_past_key_values_(__snake_case, __snake_case, __snake_case, __snake_case )
else:
lowercase : Optional[Any] = super(__snake_case, self )._flatten_past_key_values_(
__snake_case, __snake_case, __snake_case, __snake_case )
| 255
|
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content
| 297
| 0
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCAmelCase ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , '''os.path.join''' , _A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCAmelCase ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , '''pandas.read_csv''' , _A ):
pass
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _A ) is None
with patch_submodule(_test_patching , '''len''' , _A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '__test_patch_submodule_start_and_stop_mock__'
__lowercase = patch_submodule(_test_patching , '''open''' , _A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCAmelCase ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '__test_patch_submodule_successive_join__'
__lowercase = '__test_patch_submodule_successive_dirname__'
__lowercase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _A ):
with patch_submodule(_test_patching , '''os.rename''' , _A ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _A ):
with patch_submodule(_test_patching , '''os.path.join''' , _A ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _A ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _A ):
pass
| 210
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 297
| 0
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__A : Optional[int] = 299_792_458
# Symbols
__A : str = symbols('''ct x y z''')
def lowercase ( __snake_case : List[str] ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowercase ( __snake_case : Any ):
return 1 / sqrt(1 - beta(_A ) ** 2 )
def lowercase ( __snake_case : List[str] ):
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Tuple = None ):
# Ensure event is not empty
if event is None:
lowercase_ : Any = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__A : Union[str, Any] = transform(29_979_245)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__A : Optional[Any] = {ct: c, x: 1, y: 1, z: 1}
__A : List[str] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 33
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_A )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_A )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : int=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Tuple=1_6 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple="relu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[str]=2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Optional[Any]=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.eos_token_id # Eos Token
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = prepare_mam_aaa_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def A ( self : Union[str, Any] ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=__snake_case ).get_decoder().to(__snake_case ).eval()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = inputs_dict['attention_mask']
UpperCamelCase = inputs_dict['head_mask']
# first forward pass
UpperCamelCase = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(__snake_case , attention_mask=__snake_case )['last_hidden_state']
UpperCamelCase = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[
'last_hidden_state'
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-2 ) )
def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=__snake_case ).to(__snake_case ).eval()
UpperCamelCase = model(**__snake_case )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(__snake_case )
UpperCamelCase = MaMaaaEncoder.from_pretrained(__snake_case ).to(__snake_case )
UpperCamelCase = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(__snake_case )
UpperCamelCase = MaMaaaDecoder.from_pretrained(__snake_case ).to(__snake_case )
UpperCamelCase = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MaMaaaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__snake_case )
def A ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
UpperCamelCase = model_class.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertEqual(info['missing_keys'] , [] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__snake_case )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__snake_case )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
UpperCamelCase = copy.deepcopy(self._prepare_for_class(__snake_case , __snake_case ) )
if not self.is_encoder_decoder:
UpperCamelCase = inputs['input_ids']
del inputs["input_ids"]
else:
UpperCamelCase = inputs['input_ids']
UpperCamelCase = inputs.get('decoder_input_ids' , __snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __snake_case )
UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase = wte(__snake_case )
else:
UpperCamelCase = wte(__snake_case )
UpperCamelCase = wte(__snake_case )
with torch.no_grad():
model(**__snake_case )[0]
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(__snake_case )
UpperCamelCase = MaMaaaForConditionalGeneration(__snake_case ).eval().to(__snake_case )
if torch_device == "cuda":
model.half()
model.generate(__snake_case , attention_mask=__snake_case )
model.generate(num_beams=4 , do_sample=__snake_case , early_stopping=__snake_case , num_return_sequences=3 )
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
return torch.tensor(_A , dtype=torch.long , device=_A )
_lowerCamelCase : Any = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : List[Any] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __snake_case , __snake_case )
with torch.no_grad():
UpperCamelCase = model(**__snake_case )[0]
UpperCamelCase = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=__snake_case ) )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
# change to intended input
UpperCamelCase = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , __snake_case , __snake_case )
with torch.no_grad():
UpperCamelCase = model(**__snake_case )[0]
UpperCamelCase = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=__snake_case ) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__snake_case )
UpperCamelCase = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
UpperCamelCase = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase = tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
UpperCamelCase = model.generate(
input_ids=dct['input_ids'].to(__snake_case ) , attention_mask=dct['attention_mask'].to(__snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
UpperCamelCase = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__snake_case , skip_special_tokens=__snake_case )
assert generated == expected_en
| 28
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13
| 297
| 0
|
from math import pi
def lowercase_ ( _A : Any , _A : List[str] ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 184
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Any , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 225
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 297
| 0
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( lowerCamelCase__ ):
lowercase_ : Tuple =['''vqvae''']
def __init__( self ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(unet=__snake_case ,scheduler=__snake_case ,mel=__snake_case ,vqvae=__snake_case)
def A__ ( self):
return 5_0 if isinstance(self.scheduler ,__snake_case) else 1_0_0_0
@torch.no_grad()
def __call__( self ,A__ = 1 ,A__ = None ,A__ = None ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = None ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = 0 ,A__ = None ,A__ = None ,A__=True ,):
lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__snake_case)
lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=__snake_case ,device=self.device ,)
lowercase = noise
lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__snake_case ,__snake_case)
lowercase = self.mel.audio_slice_to_image(__snake_case)
lowercase = np.frombuffer(input_image.tobytes() ,dtype='''uint8''').reshape(
(input_image.height, input_image.width))
lowercase = (input_image / 2_5_5) * 2 - 1
lowercase = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowercase = self.vqvae.encode(torch.unsqueeze(__snake_case ,0)).latent_dist.sample(
generator=__snake_case)[0]
lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase = self.scheduler.add_noise(__snake_case ,__snake_case ,self.scheduler.timesteps[start_step - 1])
lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase = int(mask_start_secs * pixels_per_second)
lowercase = int(mask_end_secs * pixels_per_second)
lowercase = self.scheduler.add_noise(__snake_case ,__snake_case ,torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet ,__snake_case):
lowercase = self.unet(__snake_case ,__snake_case ,__snake_case)['sample']
else:
lowercase = self.unet(__snake_case ,__snake_case)['sample']
if isinstance(self.scheduler ,__snake_case):
lowercase = self.scheduler.step(
model_output=__snake_case ,timestep=__snake_case ,sample=__snake_case ,eta=__snake_case ,generator=__snake_case ,)['prev_sample']
else:
lowercase = self.scheduler.step(
model_output=__snake_case ,timestep=__snake_case ,sample=__snake_case ,generator=__snake_case ,)['prev_sample']
if mask is not None:
if mask_start > 0:
lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase = 1 / self.vqvae.config.scaling_factor * images
lowercase = self.vqvae.decode(__snake_case)['sample']
lowercase = (images / 2 + 0.5).clamp(0 ,1)
lowercase = images.cpu().permute(0 ,2 ,3 ,1).numpy()
lowercase = (images * 2_5_5).round().astype('''uint8''')
lowercase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__snake_case ,mode='''RGB''').convert('''L''') for _ in images))
lowercase = [self.mel.image_to_audio(__snake_case) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__snake_case)[:, np.newaxis, :]) ,**ImagePipelineOutput(__snake_case))
@torch.no_grad()
def A__ ( self ,A__ ,A__ = 5_0):
assert isinstance(self.scheduler ,__snake_case)
self.scheduler.set_timesteps(__snake_case)
lowercase = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
lowercase = (sample / 2_5_5) * 2 - 1
lowercase = torch.Tensor(__snake_case).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,))):
lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase = self.scheduler.alphas_cumprod[t]
lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase = 1 - alpha_prod_t
lowercase = self.unet(__snake_case ,__snake_case)['sample']
lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A__ ( A__ ,A__ ,A__):
lowercase = acos(torch.dot(torch.flatten(__snake_case) ,torch.flatten(__snake_case)) / torch.norm(__snake_case) / torch.norm(__snake_case))
return sin((1 - alpha) * theta) * xa / sin(__snake_case) + sin(alpha * theta) * xa / sin(__snake_case)
| 101
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
| 297
| 0
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = JukeboxTokenizer
UpperCAmelCase_ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ (self ) -> Dict:
import torch
UpperCamelCase = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
UpperCamelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ (self ) -> Tuple:
import torch
UpperCamelCase = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
UpperCamelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
UpperCamelCase = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 153
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 297
| 0
|
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return 10 - x * x
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : Any ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
lowerCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
lowerCAmelCase = c
else:
lowerCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 4
|
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Any = b.T
__lowerCamelCase : Any = np.sum(np.square(_A ) , axis=1 )
__lowerCamelCase : Tuple = np.sum(np.square(_A ) , axis=0 )
__lowerCamelCase : Tuple = np.matmul(_A , _A )
__lowerCamelCase : str = aa[:, None] - 2 * ab + ba[None, :]
return d
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Dict = x.reshape(-1 , 3 )
__lowerCamelCase : int = squared_euclidean_distance(_A , _A )
return np.argmin(_A , axis=1 )
class A_ ( lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : bool = True ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**__snake_case)
__lowerCamelCase : Optional[int] = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
__lowerCamelCase : Union[str, Any] = get_size_dict(__snake_case)
__lowerCamelCase : Union[str, Any] = np.array(__snake_case) if clusters is not None else None
__lowerCamelCase : Union[str, Any] = do_resize
__lowerCamelCase : Any = size
__lowerCamelCase : Tuple = resample
__lowerCamelCase : int = do_normalize
__lowerCamelCase : List[Any] = do_color_quantize
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Dict[str, int] ,SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : Union[str, Any] = get_size_dict(__snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.ndarray ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None ,):
__lowerCamelCase : List[Any] = rescale(image=__snake_case ,scale=1 / 127.5 ,data_format=__snake_case)
__lowerCamelCase : Any = image - 1
return image
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : ImageInput ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Dict[str, int] = None ,SCREAMING_SNAKE_CASE__ : PILImageResampling = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
__lowerCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : List[Any] = size if size is not None else self.size
__lowerCamelCase : List[str] = get_size_dict(__snake_case)
__lowerCamelCase : Dict = resample if resample is not None else self.resample
__lowerCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowerCamelCase : Optional[Any] = clusters if clusters is not None else self.clusters
__lowerCamelCase : Any = np.array(__snake_case)
__lowerCamelCase : List[Any] = make_list_of_images(__snake_case)
if not valid_images(__snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
__lowerCamelCase : List[Any] = [to_numpy_array(__snake_case) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case) for image in images]
if do_normalize:
__lowerCamelCase : str = [self.normalize(image=__snake_case) for image in images]
if do_color_quantize:
__lowerCamelCase : Dict = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowerCamelCase : List[Any] = np.array(__snake_case)
__lowerCamelCase : int = color_quantize(__snake_case ,__snake_case).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
__lowerCamelCase : str = images.shape[0]
__lowerCamelCase : Dict = images.reshape(__snake_case ,-1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowerCamelCase : List[str] = list(__snake_case)
else:
__lowerCamelCase : Optional[int] = [to_channel_dimension_format(__snake_case ,__snake_case) for image in images]
__lowerCamelCase : int = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case)
| 73
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
| 297
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any]=100 ,lowerCamelCase__ : Any=13 ,lowerCamelCase__ : str=30 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : List[str]=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : List[str]=3 ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Any=[0, 1, 2, 3] ,) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = out_indices
SCREAMING_SNAKE_CASE = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModel(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE = model(__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE = model(__snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE = model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case : Optional[Any] = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case : Tuple = False
__snake_case : List[Any] = False
__snake_case : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__snake_case )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE = model_class(__snake_case )
model.to(__snake_case )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
SCREAMING_SNAKE_CASE = model(**__snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE = model_class(__snake_case )
model.gradient_checkpointing_enable()
model.to(__snake_case )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
SCREAMING_SNAKE_CASE = model(**__snake_case ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BeitModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__snake_case )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__snake_case ,return_tensors="""pt""" ).pixel_values.to(__snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE = torch.ones((1, 196) ,dtype=torch.bool ).to(__snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=__snake_case ,bool_masked_pos=__snake_case )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,__snake_case )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,__snake_case ,atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__snake_case )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__snake_case ,return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__snake_case )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(logits.shape ,__snake_case )
SCREAMING_SNAKE_CASE = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,__snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 281
self.assertEqual(logits.argmax(-1 ).item() ,__snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__snake_case )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__snake_case ,return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__snake_case )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 21841) )
self.assertEqual(logits.shape ,__snake_case )
SCREAMING_SNAKE_CASE = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,__snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE = 2396
self.assertEqual(logits.argmax(-1 ).item() ,__snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
SCREAMING_SNAKE_CASE = model.to(__snake_case )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=__snake_case ,size=640 ,do_center_crop=__snake_case )
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE = image_processor(images=__snake_case ,return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__snake_case )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,__snake_case )
SCREAMING_SNAKE_CASE = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] ,device=__snake_case ,)
else:
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] ,device=__snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__snake_case ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
SCREAMING_SNAKE_CASE = model.to(__snake_case )
SCREAMING_SNAKE_CASE = BeitImageProcessor(do_resize=__snake_case ,size=640 ,do_center_crop=__snake_case )
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE = image_processor(images=__snake_case ,return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__snake_case )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=__snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,__snake_case )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
SCREAMING_SNAKE_CASE = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,__snake_case )
| 296
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 297
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( lowerCamelCase__, unittest.TestCase ):
_lowerCamelCase = CTRLTokenizer
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : Dict ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
lowercase : Tuple = dict(zip(__snake_case, range(len(__snake_case ) ) ) )
lowercase : Union[str, Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
lowercase : Optional[Any] = {'unk_token': '<unk>'}
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase ( self : Union[str, Any], **lowerCAmelCase : Tuple ) -> Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **__snake_case )
def lowercase ( self : Any, lowerCAmelCase : List[str] ) -> Tuple:
lowercase : List[Any] = 'adapt react readapt apt'
lowercase : List[str] = 'adapt react readapt apt'
return input_text, output_text
def lowercase ( self : Any ) -> Optional[Any]:
lowercase : List[str] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase : Tuple = 'adapt react readapt apt'
lowercase : Any = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
lowercase : Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case, __snake_case )
lowercase : Tuple = tokens + [tokenizer.unk_token]
lowercase : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ), __snake_case )
| 255
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : Union[str, Any] = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 210
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 297
| 0
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__A : Tuple = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__A : Any = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__A : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__A : List[str] = F"""down_blocks.{i}.resnets.{j}."""
__A : List[str] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__A : Optional[Any] = F"""down_blocks.{i}.attentions.{j}."""
__A : Any = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__A : Optional[int] = F"""up_blocks.{i}.resnets.{j}."""
__A : Optional[int] = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__A : List[Any] = F"""up_blocks.{i}.attentions.{j}."""
__A : Optional[Any] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__A : Dict = F"""down_blocks.{i}.downsamplers.0.conv."""
__A : Tuple = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__A : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0."""
__A : Optional[int] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__A : Dict = 'mid_block.attentions.0.'
__A : List[Any] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__A : Union[str, Any] = F"""mid_block.resnets.{j}."""
__A : Optional[Any] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowercase ( __snake_case : int ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase_ : Optional[int] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase_ : Tuple = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase_ : Dict = v.replace(_A , _A )
lowercase_ : List[str] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase_ : str = v.replace(_A , _A )
lowercase_ : Any = v
lowercase_ : str = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__A : Tuple = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__A : Dict = F"""encoder.down_blocks.{i}.resnets.{j}."""
__A : int = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__A : str = F"""down_blocks.{i}.downsamplers.0."""
__A : Dict = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__A : Dict = F"""up_blocks.{i}.upsamplers.0."""
__A : Dict = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__A : Tuple = F"""decoder.up_blocks.{i}.resnets.{j}."""
__A : Dict = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__A : Optional[Any] = F"""mid_block.resnets.{i}."""
__A : List[str] = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__A : int = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowercase ( __snake_case : Optional[int] ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def lowercase ( __snake_case : Dict ):
lowercase_ : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase_ : List[Any] = v.replace(_A , _A )
lowercase_ : Any = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase_ : Optional[Any] = v.replace(_A , _A )
lowercase_ : Dict = v
lowercase_ : str = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase_ : List[Any] = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
lowercase_ : int = reshape_weight_for_sd(_A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__A : str = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__A : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__A : Optional[int] = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__A : Any = {'q': 0, 'k': 1, 'v': 2}
def lowercase ( __snake_case : List[str] ):
lowercase_ : List[str] = {}
lowercase_ : Optional[Any] = {}
lowercase_ : int = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowercase_ : Optional[Any] = k[: -len('''.q_proj.weight''' )]
lowercase_ : int = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowercase_ : List[Any] = [None, None, None]
lowercase_ : Dict = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowercase_ : Tuple = k[: -len('''.q_proj.bias''' )]
lowercase_ : int = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowercase_ : Optional[int] = [None, None, None]
lowercase_ : Optional[Any] = v
continue
lowercase_ : str = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ : Optional[Any] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Optional[int] = torch.cat(_A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase_ : Optional[Any] = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )] , _A )
lowercase_ : Tuple = torch.cat(_A )
return new_state_dict
def lowercase ( __snake_case : str ):
return text_enc_dict
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
__A : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__A : Any = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
__A : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
__A : List[Any] = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__A : Any = load_file(unet_path, device='''cpu''')
else:
__A : Tuple = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
__A : List[Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
__A : List[str] = load_file(vae_path, device='''cpu''')
else:
__A : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
__A : Optional[Any] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
__A : Any = load_file(text_enc_path, device='''cpu''')
else:
__A : List[Any] = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
__A : List[str] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
__A : List[Any] = convert_unet_state_dict(unet_state_dict)
__A : int = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__A : str = convert_vae_state_dict(vae_state_dict)
__A : Tuple = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__A : int = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__A : Optional[Any] = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__A : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
__A : Any = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__A : List[Any] = convert_text_enc_state_dict(text_enc_dict)
__A : Dict = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__A : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__A : List[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__A : str = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 33
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 297
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 28
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 297
| 0
|
from copy import deepcopy
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : List[Any] = [0] * size
elif arr is not None:
self.init(__snake_case )
else:
raise ValueError("Either arr or size must be specified" )
def lowerCAmelCase ( self : str , __lowerCamelCase : list[int] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = len(__snake_case )
lowerCamelCase__ : Optional[Any] = deepcopy(__snake_case )
for i in range(1 , self.size ):
lowerCamelCase__ : List[Any] = self.next_(__snake_case )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase__ : str = self.next_(__snake_case )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase__ : Union[str, Any] = self.next_(__snake_case )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
self.add(__snake_case , value - self.get(__snake_case ) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int ):
'''simple docstring'''
if right == 0:
return 0
lowerCamelCase__ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase__ : List[str] = self.prev(__snake_case )
return result
def lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
return self.prefix(__snake_case ) - self.prefix(__snake_case )
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
return self.query(__snake_case , index + 1 )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase__ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase__ : List[str] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs
| 297
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = 'Hello world! cécé herlolip'
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = FairseqRobertaModel.from_pretrained(_A )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _A )
SCREAMING_SNAKE_CASE_ = XLMRobertaXLForSequenceClassification(_A ) if classification_head else XLMRobertaXLForMaskedLM(_A )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE_ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE_ = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE_ = layer.attention
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE_ = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.weight
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE_ = roberta.encode(_A ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE_ = model(_A )[0]
if classification_head:
SCREAMING_SNAKE_CASE_ = roberta.model.classification_heads['mnli'](roberta.extract_features(_A ) )
else:
SCREAMING_SNAKE_CASE_ = roberta.model(_A )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
SCREAMING_SNAKE_CASE_ = torch.allclose(_A , _A , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_A ).mkdir(parents=_A , exist_ok=_A )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 225
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 297
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 297
| 0
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="attention" ):
"""simple docstring"""
UpperCamelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if split_mlp_wi:
UpperCamelCase = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
UpperCamelCase = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
UpperCamelCase = (wi_a, wi_a)
else:
UpperCamelCase = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
UpperCamelCase = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def a__ ( _SCREAMING_SNAKE_CASE , *, _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
UpperCamelCase = traverse_util.flatten_dict(variables["target"] )
UpperCamelCase = {'/'.join(_A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print("Split MLP:" , _A )
UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
UpperCamelCase = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_attention_layer_norm" )
UpperCamelCase = tax_attention_lookup(_A , _A , "encoder" , "attention" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 1 (MLP).
UpperCamelCase = tax_layer_norm_lookup(_A , _A , "encoder" , "pre_mlp_layer_norm" )
UpperCamelCase = tax_mlp_lookup(_A , _A , "encoder" , _A )
UpperCamelCase = layer_norm
if split_mlp_wi:
UpperCamelCase = wi[0].T
UpperCamelCase = wi[1].T
else:
UpperCamelCase = wi.T
UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase = tax_relpos_bias_lookup(
_A , _A , "encoder" ).T
UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
UpperCamelCase = tax_relpos_bias_lookup(
_A , 0 , "encoder" ).T
UpperCamelCase = tax_relpos_bias_lookup(
_A , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_A ):
# Block i, layer 0 (Self Attention).
UpperCamelCase = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_self_attention_layer_norm" )
UpperCamelCase = tax_attention_lookup(_A , _A , "decoder" , "self_attention" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_cross_attention_layer_norm" )
UpperCamelCase = tax_attention_lookup(_A , _A , "decoder" , "encoder_decoder_attention" )
UpperCamelCase = layer_norm
UpperCamelCase = k.T
UpperCamelCase = o.T
UpperCamelCase = q.T
UpperCamelCase = v.T
# Block i, layer 2 (MLP).
UpperCamelCase = tax_layer_norm_lookup(_A , _A , "decoder" , "pre_mlp_layer_norm" )
UpperCamelCase = tax_mlp_lookup(_A , _A , "decoder" , _A )
UpperCamelCase = layer_norm
if split_mlp_wi:
UpperCamelCase = wi[0].T
UpperCamelCase = wi[1].T
else:
UpperCamelCase = wi.T
UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase = tax_relpos_bias_lookup(_A , _A , "decoder" ).T
UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCamelCase = state_dict['shared.weight']
return state_dict
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = checkpoints.load_tax_checkpoint(_A )
UpperCamelCase = convert_tax_to_pytorch(
_A , num_layers=config.num_layers , is_encoder_only=_A , scalable_attention=_A )
UpperCamelCase = make_state_dict(_A , _A )
model.load_state_dict(_A , strict=_A )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
UpperCamelCase = MTaConfig.from_json_file(_A )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase = UMTaEncoderModel(_A )
else:
UpperCamelCase = UMTaForConditionalGeneration(_A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_A , _A , _A , _A , _A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_A )
# Verify that we can load the checkpoint.
model.from_pretrained(_A )
print("Done" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 153
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 297
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=6.0 , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase="fp4" , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> int:
__UpperCamelCase : Tuple = load_in_abit
__UpperCamelCase : Tuple = load_in_abit
__UpperCamelCase : Optional[Any] = llm_inta_threshold
__UpperCamelCase : Any = llm_inta_skip_modules
__UpperCamelCase : List[str] = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase : Union[str, Any] = llm_inta_has_fpaa_weight
__UpperCamelCase : int = bnb_abit_quant_type
__UpperCamelCase : str = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase : Dict = torch.floataa
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : Tuple = getattr(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.dtype ):
__UpperCamelCase : Union[str, Any] = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def a_ (self ) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , _UpperCAmelCase ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _UpperCAmelCase ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _UpperCAmelCase ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , _UpperCAmelCase ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , _UpperCAmelCase ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , _UpperCAmelCase ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def a_ (self ) -> Dict:
return self.load_in_abit or self.load_in_abit
def a_ (self ) -> List[str]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def a_ (cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
__UpperCamelCase : int = cls(**_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = []
for key, value in kwargs.items():
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
to_remove.append(_UpperCAmelCase )
for key in to_remove:
kwargs.pop(_UpperCAmelCase , _UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def a_ (self , _UpperCAmelCase ) -> Tuple:
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
__UpperCamelCase : Optional[int] = self.to_dict()
__UpperCamelCase : List[str] = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + "\n"
writer.write(_UpperCAmelCase )
def a_ (self ) -> Dict[str, Any]:
__UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCamelCase : Union[str, Any] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__(self ) -> List[str]:
return f"{self.__class__.__name__} {self.to_json_string()}"
def a_ (self , _UpperCAmelCase = True ) -> str:
if use_diff is True:
__UpperCamelCase : Tuple = self.to_diff_dict()
else:
__UpperCamelCase : Optional[Any] = self.to_dict()
return json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + "\n"
def a_ (self ) -> Dict[str, Any]:
__UpperCamelCase : str = self.to_dict()
# get the default config dict
__UpperCamelCase : Optional[Any] = BitsAndBytesConfig().to_dict()
__UpperCamelCase : Union[str, Any] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase : List[Any] = value
return serializable_config_dict
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCAmelCase = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class A ( tr.AbstractTransform ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase = " " ) -> Union[str, Any]:
__UpperCamelCase : str = sentence_delimiter
def a_ (self , _UpperCAmelCase ) -> List[Any]:
return list(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = []
for sent_idx, sentence in enumerate(_UpperCAmelCase ):
chars.extend(self.process_string(_UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCAmelCase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowerCAmelCase = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
_lowerCAmelCase = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def a_ (self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Dict:
if concatenate_texts:
return jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )["wer"]
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Dict = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : str = jiwer.compute_measures(
_UpperCAmelCase , _UpperCAmelCase , truth_transform=_UpperCAmelCase , hypothesis_transform=_UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 298
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowerCAmelCase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowerCAmelCase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = set()
__UpperCamelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : List[str] = char
__UpperCamelCase : Dict = set(snake_case__ )
return pairs
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , **_UpperCAmelCase , ) -> Any:
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCamelCase : Tuple = vocab_file
__UpperCamelCase : List[Any] = merges_file
__UpperCamelCase : Tuple = {}
__UpperCamelCase : int = 0
__UpperCamelCase : List[str] = 1
__UpperCamelCase : Tuple = 2
__UpperCamelCase : Union[str, Any] = 3
self.add_from_file(_UpperCAmelCase )
__UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding="utf-8" ) as merges_handle:
__UpperCamelCase : List[Any] = merges_handle.read().split("\n" )[:-1]
__UpperCamelCase : List[str] = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCamelCase : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : str = {}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
__UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
__UpperCamelCase : Union[str, Any] = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a_ (self ) -> str:
return len(self.encoder )
def a_ (self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def a_ (self , _UpperCAmelCase ) -> Any:
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Optional[int] = tuple(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCamelCase : List[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
__UpperCamelCase : List[str] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : Optional[int] = bigram
__UpperCamelCase : Dict = []
__UpperCamelCase : str = 0
while i < len(_UpperCAmelCase ):
try:
__UpperCamelCase : Tuple = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : int = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : List[str] = tuple(_UpperCAmelCase )
__UpperCamelCase : int = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
__UpperCamelCase : Any = get_pairs(_UpperCAmelCase )
__UpperCamelCase : str = "@@ ".join(_UpperCAmelCase )
__UpperCamelCase : Dict = word[:-4]
__UpperCamelCase : int = word
return word
def a_ (self , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : int = []
__UpperCamelCase : int = re.findall(R"\S+\n?" , _UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(" " ) ) )
return split_tokens
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def a_ (self , _UpperCAmelCase ) -> Optional[Any]:
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = " ".join(_UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.merges_file , _UpperCAmelCase )
return out_vocab_file, out_merge_file
def a_ (self , _UpperCAmelCase ) -> int:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
with open(_UpperCAmelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__UpperCamelCase : Union[str, Any] = f.readlines()
for lineTmp in lines:
__UpperCamelCase : str = lineTmp.strip()
__UpperCamelCase : List[Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__UpperCamelCase : Any = line[:idx]
__UpperCamelCase : Optional[int] = len(self.encoder )
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_lowerCAmelCase = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None ):
__UpperCamelCase : Union[str, Any] = F"\n{hint}" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , snake_case__ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = requirement, None, None
else:
__UpperCamelCase : Optional[Any] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , snake_case__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F" got {requirement}" )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = match[0]
__UpperCamelCase : Optional[int] = want_full.split("," ) # there could be multiple requirements
__UpperCamelCase : Dict = {}
for w in want_range:
__UpperCamelCase : Optional[Any] = re.findall(r"^([\s!=<>]{1,2})(.+)" , snake_case__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F" but got {requirement}" )
__UpperCamelCase , __UpperCamelCase : Any = match[0]
__UpperCamelCase : str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__UpperCamelCase : Any = ".".join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
__UpperCamelCase : Tuple = importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(snake_case__ , snake_case__ )
| 298
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCamelCase : int = mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ )
else:
__UpperCamelCase : str = max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ ) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1] ) + val[i - 1] , )
__UpperCamelCase : Any = val
return f[i][j]
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCamelCase : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCamelCase : Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if not (isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCamelCase : Tuple = len(snake_case__ )
if num_items != len(snake_case__ ):
__UpperCamelCase : Tuple = (
"The number of weights must be the same as the number of values.\n"
F"But got {num_items} weights and {len(snake_case__ )} values"
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i] , snake_case__ ):
__UpperCamelCase : Dict = (
"All weights must be integers but got weight of "
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(snake_case__ )
__UpperCamelCase , __UpperCamelCase : Tuple = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : set = set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return optimal_val, example_optional_set
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = [3, 2, 4, 4]
_lowerCAmelCase = [4, 3, 2, 3]
_lowerCAmelCase = 4
_lowerCAmelCase = 6
_lowerCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCAmelCase , _lowerCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCAmelCase , _lowerCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : list[list[str]] = [[] for _ in range(snake_case__ )]
__UpperCamelCase : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case__ ) <= key:
return input_string
for position, character in enumerate(snake_case__ ):
__UpperCamelCase : List[str] = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : List[str] = min(snake_case__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case__ )
__UpperCamelCase : Tuple = ["".join(snake_case__ ) for row in temp_grid]
__UpperCamelCase : Tuple = "".join(snake_case__ )
return output_string
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : int = []
__UpperCamelCase : Union[str, Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__UpperCamelCase : list[list[str]] = [[] for _ in range(snake_case__ )] # generates template
for position in range(len(snake_case__ ) ):
__UpperCamelCase : Optional[Any] = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : Dict = min(snake_case__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__UpperCamelCase : Any = 0
for row in temp_grid: # fills in the characters
__UpperCamelCase : Any = input_string[counter : counter + len(snake_case__ )]
grid.append(list(snake_case__ ) )
counter += len(snake_case__ )
__UpperCamelCase : Any = "" # reads as zigzag
for position in range(len(snake_case__ ) ):
__UpperCamelCase : int = position % (lowest * 2) # puts it in bounds
__UpperCamelCase : Union[str, Any] = min(snake_case__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = {}
for key_guess in range(1 , len(snake_case__ ) ): # tries every key
__UpperCamelCase : Union[str, Any] = decrypt(snake_case__ , snake_case__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__UpperCamelCase : str = 192
__UpperCamelCase : Optional[int] = 768
__UpperCamelCase : Dict = 12
__UpperCamelCase : List[str] = 3
__UpperCamelCase : Optional[Any] = [800, 1_333]
__UpperCamelCase : List[Any] = False
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : Any = 330
__UpperCamelCase : List[Any] = 14
__UpperCamelCase : Union[str, Any] = 6
__UpperCamelCase : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
__UpperCamelCase : Union[str, Any] = 384
__UpperCamelCase : Union[str, Any] = 1_536
__UpperCamelCase : Union[str, Any] = 12
__UpperCamelCase : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__UpperCamelCase : Tuple = [800, 1_344]
__UpperCamelCase : List[Any] = 91
__UpperCamelCase : int = "huggingface/label-files"
__UpperCamelCase : List[Any] = "coco-detection-id2label.json"
__UpperCamelCase : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__UpperCamelCase : List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : int = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase : Dict = in_proj_bias[: config.hidden_size]
__UpperCamelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase : Tuple = in_proj_weight[-config.hidden_size :, :]
__UpperCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( snake_case__ ):
if "backbone" in name:
__UpperCamelCase : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
__UpperCamelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__UpperCamelCase : Optional[Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__UpperCamelCase : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__UpperCamelCase : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__UpperCamelCase : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__UpperCamelCase : Optional[Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__UpperCamelCase : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__UpperCamelCase : Any = name.replace("attn" , "attention.self" )
if "norm1" in name:
__UpperCamelCase : Tuple = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__UpperCamelCase : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__UpperCamelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCamelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__UpperCamelCase : List[str] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__UpperCamelCase : Dict = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__UpperCamelCase : Dict = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
for key in orig_state_dict.copy().keys():
__UpperCamelCase : Any = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__UpperCamelCase : Optional[Any] = key.split("." )
__UpperCamelCase : List[Any] = int(key_split[2] )
__UpperCamelCase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__UpperCamelCase : Optional[int] = val[:dim, :]
__UpperCamelCase : Dict = val[
dim : dim * 2, :
]
__UpperCamelCase : Optional[int] = val[-dim:, :]
else:
__UpperCamelCase : Optional[int] = val[:dim]
__UpperCamelCase : Any = val[dim : dim * 2]
__UpperCamelCase : Union[str, Any] = val[-dim:]
else:
__UpperCamelCase : Any = val
return orig_state_dict
def __lowerCAmelCase ( ):
__UpperCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Any = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ):
__UpperCamelCase : int = get_yolos_config(snake_case__ )
# load original state_dict
__UpperCamelCase : int = torch.load(snake_case__ , map_location="cpu" )["model"]
# load 🤗 model
__UpperCamelCase : Tuple = YolosForObjectDetection(snake_case__ )
model.eval()
__UpperCamelCase : Optional[int] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by YolosImageProcessor
__UpperCamelCase : Tuple = 800 if yolos_name != "yolos_ti" else 512
__UpperCamelCase : str = YolosImageProcessor(format="coco_detection" , size=snake_case__ )
__UpperCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
__UpperCamelCase : List[Any] = model(**snake_case__ )
__UpperCamelCase , __UpperCamelCase : List[Any] = outputs.logits, outputs.pred_boxes
__UpperCamelCase , __UpperCamelCase : Optional[int] = None, None
if yolos_name == "yolos_ti":
__UpperCamelCase : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__UpperCamelCase : Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__UpperCamelCase : int = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__UpperCamelCase : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__UpperCamelCase : Any = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__UpperCamelCase : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__UpperCamelCase : Dict = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__UpperCamelCase : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__UpperCamelCase : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case__ , atol=1E-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__UpperCamelCase : Union[str, Any] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__UpperCamelCase : Dict = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case__ , organization="hustvl" )
model.push_to_hub(snake_case__ , organization="hustvl" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mobilenet_v2"
def __init__(self , _UpperCAmelCase=3 , _UpperCAmelCase=2_2_4 , _UpperCAmelCase=1.0 , _UpperCAmelCase=8 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=3_2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu6" , _UpperCAmelCase=True , _UpperCAmelCase=0.8 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.001 , _UpperCAmelCase=2_5_5 , **_UpperCAmelCase , ) -> Any:
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Any = image_size
__UpperCamelCase : Optional[int] = depth_multiplier
__UpperCamelCase : Tuple = depth_divisible_by
__UpperCamelCase : Any = min_depth
__UpperCamelCase : Optional[int] = expand_ratio
__UpperCamelCase : List[Any] = output_stride
__UpperCamelCase : str = first_layer_is_expansion
__UpperCamelCase : List[str] = finegrained_output
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : str = tf_padding
__UpperCamelCase : Optional[int] = classifier_dropout_prob
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : str = layer_norm_eps
__UpperCamelCase : str = semantic_loss_ignore_index
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = version.parse("1.11" )
@property
def a_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def a_ (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def a_ (self ) -> float:
return 1E-4
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase : Dict = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCAmelCase ( ):
assert _test_patching.open is open
__UpperCamelCase : str = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , snake_case__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
__UpperCamelCase : Union[str, Any] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , snake_case__ ):
pass
def __lowerCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__UpperCamelCase : List[Any] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , snake_case__ ) is None
with patch_submodule(_test_patching , "len" , snake_case__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = "__test_patch_submodule_start_and_stop_mock__"
__UpperCamelCase : Any = patch_submodule(_test_patching , "open" , snake_case__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase : Union[str, Any] = "__test_patch_submodule_successive_join__"
__UpperCamelCase : Tuple = "__test_patch_submodule_successive_dirname__"
__UpperCamelCase : Tuple = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
with patch_submodule(_test_patching , "os.rename" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , snake_case__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , snake_case__ ):
pass
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import functools
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
# Validation
if not isinstance(snake_case__ , snake_case__ ) or not all(isinstance(snake_case__ , snake_case__ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(snake_case__ ) != 3 or not all(isinstance(snake_case__ , snake_case__ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(snake_case__ ) == 0:
return 0
if min(snake_case__ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(snake_case__ ) >= 366:
raise ValueError("All days elements should be less than 366" )
__UpperCamelCase : List[str] = set(snake_case__ )
@functools.cache
def dynamic_programming(snake_case__ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
A = Features({"audio": Audio()} )
A = Features({"transcription": Value("string" )} )
A = "audio"
A = "transcription"
def a_ (self , _UpperCAmelCase ) -> str:
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , _UpperCAmelCase ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
__UpperCamelCase : Any = copy.deepcopy(self )
__UpperCamelCase : Optional[Any] = self.input_schema.copy()
__UpperCamelCase : List[Any] = features[self.audio_column]
__UpperCamelCase : int = input_schema
return task_template
@property
def a_ (self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "Salesforce/blip-image-captioning-base"
A = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
A = "image_captioner"
A = AutoModelForVisionaSeq
A = ["image"]
A = ["text"]
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ["vision"] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> List[Any]:
return self.pre_processor(images=_UpperCAmelCase , return_tensors="pt" )
def a_ (self , _UpperCAmelCase ) -> List[str]:
return self.model.generate(**_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> List[Any]:
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0].strip()
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Any = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
__UpperCamelCase : List[str] = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
__UpperCamelCase : Optional[Any] = "huggingface/label-files"
if "o365" in model_name:
__UpperCamelCase : Dict = 366
__UpperCamelCase : int = "object365-id2label.json"
else:
__UpperCamelCase : int = 91
__UpperCamelCase : Union[str, Any] = "coco-detection-id2label.json"
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Optional[int] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="dataset" ) ) , "r" ) )
__UpperCamelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Dict = idalabel
__UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = dct.pop(snake_case__ )
__UpperCamelCase : str = val
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase : Optional[Any] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCamelCase : Optional[int] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Any = in_proj_weight[:dim, :]
__UpperCamelCase : Optional[Any] = in_proj_bias[: dim]
__UpperCamelCase : Tuple = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
__UpperCamelCase : Any = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
# transformer decoder self-attention layers
__UpperCamelCase : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase : List[str] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase : List[str] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : int = in_proj_weight[:hidden_size, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[:hidden_size]
__UpperCamelCase : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCamelCase : Tuple = in_proj_bias[-hidden_size:]
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = get_deta_config(snake_case__ )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase : Optional[int] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase : int = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
__UpperCamelCase : Optional[int] = torch.load(snake_case__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape )
# rename keys
__UpperCamelCase : Union[str, Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_swin_q_k_v(snake_case__ , config.backbone_config )
read_in_decoder_q_k_v(snake_case__ , snake_case__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase : Union[str, Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : List[str] = val
if "input_proj" in key:
__UpperCamelCase : Any = state_dict.pop(snake_case__ )
__UpperCamelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : Tuple = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase : Dict = DetaForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
__UpperCamelCase : Any = "cuda" if torch.cuda.is_available() else "cpu"
model.to(snake_case__ )
# load image processor
__UpperCamelCase : Tuple = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors="pt" )
__UpperCamelCase : str = encoding["pixel_values"]
__UpperCamelCase : str = model(pixel_values.to(snake_case__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase : Any = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCamelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase : Tuple = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCamelCase : str = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
'''simple docstring'''
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase = "" , _UpperCAmelCase = False ) -> None:
# Mapping from the first character of the prefix of the node
__UpperCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__UpperCamelCase : Union[str, Any] = is_leaf
__UpperCamelCase : int = prefix
def a_ (self , _UpperCAmelCase ) -> tuple[str, str, str]:
__UpperCamelCase : Optional[int] = 0
for q, w in zip(self.prefix , _UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def a_ (self , _UpperCAmelCase ) -> None:
for word in words:
self.insert(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCamelCase : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCamelCase : Optional[int] = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase )
else:
__UpperCamelCase : int = self.nodes[word[0]]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = incoming_node.match(
_UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCamelCase : int = remaining_prefix
__UpperCamelCase : List[str] = self.nodes[matching_string[0]]
__UpperCamelCase : List[str] = RadixNode(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Optional[int] = aux_node
if remaining_word == "":
__UpperCamelCase : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> bool:
__UpperCamelCase : Any = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> bool:
__UpperCamelCase : Tuple = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__UpperCamelCase : Any = list(self.nodes.values() )[0]
__UpperCamelCase : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCamelCase : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__UpperCamelCase : Union[str, Any] = False
# If there is 1 edge, we merge it with its child
else:
__UpperCamelCase : Dict = list(incoming_node.nodes.values() )[0]
__UpperCamelCase : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCamelCase : str = merging_node.nodes
return True
def a_ (self , _UpperCAmelCase = 0 ) -> None:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = "banana bananas bandana band apple all beast".split()
__UpperCamelCase : Optional[int] = RadixNode()
root.insert_many(snake_case__ )
assert all(root.find(snake_case__ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __lowerCAmelCase ( ):
assert test_trie()
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = RadixNode()
__UpperCamelCase : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__ )
print("Words:" , snake_case__ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "glpn"
def __init__(self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[3_2, 6_4, 1_6_0, 2_5_6] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[1, 2, 5, 8] , _UpperCAmelCase=[4, 4, 4, 4] , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=6_4 , _UpperCAmelCase=1_0 , _UpperCAmelCase=-1 , **_UpperCAmelCase , ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : List[str] = num_encoder_blocks
__UpperCamelCase : List[str] = depths
__UpperCamelCase : Optional[Any] = sr_ratios
__UpperCamelCase : Tuple = hidden_sizes
__UpperCamelCase : Optional[Any] = patch_sizes
__UpperCamelCase : Union[str, Any] = strides
__UpperCamelCase : Optional[int] = mlp_ratios
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Dict = hidden_act
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Union[str, Any] = drop_path_rate
__UpperCamelCase : Optional[Any] = layer_norm_eps
__UpperCamelCase : List[str] = decoder_hidden_size
__UpperCamelCase : Dict = max_depth
__UpperCamelCase : Optional[int] = head_in_index
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase , __UpperCamelCase : Dict = emb.weight.shape
__UpperCamelCase : List[str] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
__UpperCamelCase : Tuple = emb.weight.data
return lin_layer
def __lowerCAmelCase ( snake_case__ , snake_case__=None ):
__UpperCamelCase : Dict = {}
for old_key in state_dict.keys():
__UpperCamelCase : str = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__UpperCamelCase : Any = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
__UpperCamelCase : List[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
__UpperCamelCase : Dict = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
__UpperCamelCase : Union[str, Any] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
__UpperCamelCase : str = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
__UpperCamelCase : List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
__UpperCamelCase : Optional[int] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
__UpperCamelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" )
__UpperCamelCase : Tuple = state_dict[old_key]
return new_dict
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = WEIGHTS_NAME ):
__UpperCamelCase : str = []
__UpperCamelCase : List[str] = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
for expert in range(snake_case__ ):
__UpperCamelCase : Optional[int] = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(snake_case__ ):
__UpperCamelCase : Optional[int] = torch.load(snake_case__ )["model"]
remove_ignore_keys_(snake_case__ )
__UpperCamelCase : Optional[Any] = rename_fairseq_keys(snake_case__ , snake_case__ )
__UpperCamelCase : Union[str, Any] = os.path.join(
snake_case__ , weights_name.replace(".bin" , F"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
torch.save(snake_case__ , snake_case__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(snake_case__ )[0]].dtype )
# Add the last block
__UpperCamelCase : Tuple = os.path.join(snake_case__ , weights_name.replace(".bin" , F"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
__UpperCamelCase : Any = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(snake_case__ )
__UpperCamelCase : Optional[Any] = rename_fairseq_keys(snake_case__ , snake_case__ )
__UpperCamelCase : int = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(snake_case__ ) == 1:
__UpperCamelCase : List[Any] = os.path.join(snake_case__ , snake_case__ )
torch.save(snake_case__ , snake_case__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(snake_case__ , snake_case__ )
# Otherwise, let's build the index
__UpperCamelCase : Dict = {}
for idx, shard in enumerate(snake_case__ ):
__UpperCamelCase : int = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" )
__UpperCamelCase : int = os.path.join(snake_case__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
for key in shard:
__UpperCamelCase : Optional[Any] = shard_file
# Add the metadata
__UpperCamelCase : Tuple = {"total_size": total_size}
__UpperCamelCase : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" ) as f:
__UpperCamelCase : List[str] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n"
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = 42
A = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 298
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 1
|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase = '''bart'''
_lowerCAmelCase = True
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCAmelCase ( ):
if LOAD_DENSE_INDEX:
__UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
__UpperCamelCase : List[str] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
__UpperCamelCase : Optional[Any] = qar_model.eval()
else:
__UpperCamelCase , __UpperCamelCase : Dict = (None, None)
if MODEL_TYPE == "bart":
__UpperCamelCase : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
__UpperCamelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
__UpperCamelCase : str = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
__UpperCamelCase : Optional[Any] = sas_model.eval()
else:
__UpperCamelCase , __UpperCamelCase : int = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCAmelCase ( ):
if LOAD_DENSE_INDEX:
__UpperCamelCase : Optional[Any] = faiss.StandardGpuResources()
__UpperCamelCase : Union[str, Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
__UpperCamelCase : List[str] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCamelCase : str = faiss.IndexFlatIP(128 )
__UpperCamelCase : Union[str, Any] = faiss.index_cpu_to_gpu(snake_case__ , 1 , snake_case__ )
wikiaab_gpu_index_flat.add(snake_case__ ) # TODO fix for larger GPU
else:
__UpperCamelCase , __UpperCamelCase : List[str] = (None, None)
__UpperCamelCase : Tuple = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
__UpperCamelCase : List[str] = elia["train_eli5"]
__UpperCamelCase : Optional[Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
__UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case__ )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_models()
_lowerCAmelCase , _lowerCAmelCase = load_train_data()
def __lowerCAmelCase ( snake_case__ , snake_case__=10 ):
__UpperCamelCase : str = embed_questions_for_retrieval([question] , snake_case__ , snake_case__ )
__UpperCamelCase , __UpperCamelCase : int = eli5_train_q_index.search(snake_case__ , snake_case__ )
__UpperCamelCase : Optional[Any] = [elia_train[int(snake_case__ )] for i in I[0]]
return nn_examples
def __lowerCAmelCase ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ):
if source == "none":
__UpperCamelCase , __UpperCamelCase : Dict = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCamelCase , __UpperCamelCase : List[Any] = query_qa_dense_index(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = query_es_index(
snake_case__ , snake_case__ , index_name="english_wiki40b_snippets_100w" , n_results=snake_case__ , )
__UpperCamelCase : Tuple = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
__UpperCamelCase : List[str] = "question: {} context: {}".format(snake_case__ , snake_case__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=256 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ):
with torch.no_grad():
__UpperCamelCase : Dict = qa_sas_generate(
snake_case__ , snake_case__ , snake_case__ , num_answers=1 , num_beams=snake_case__ , min_len=snake_case__ , max_len=snake_case__ , do_sample=snake_case__ , temp=snake_case__ , top_p=snake_case__ , top_k=snake_case__ , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
_lowerCAmelCase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
_lowerCAmelCase = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
_lowerCAmelCase = st.sidebar.checkbox('''Demo options''')
if demo_options:
_lowerCAmelCase = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
_lowerCAmelCase = action_list.index(action_st)
_lowerCAmelCase = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
_lowerCAmelCase = show_type == '''Show full text of passages'''
else:
_lowerCAmelCase = 3
_lowerCAmelCase = True
_lowerCAmelCase = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
_lowerCAmelCase = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
_lowerCAmelCase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
_lowerCAmelCase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
_lowerCAmelCase = '''wiki40b'''
_lowerCAmelCase = '''dense'''
_lowerCAmelCase = '''beam'''
_lowerCAmelCase = 2
_lowerCAmelCase = 64
_lowerCAmelCase = 256
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = st.sidebar.checkbox('''Generation options''')
if generate_options:
_lowerCAmelCase = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
_lowerCAmelCase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
_lowerCAmelCase = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase = None
# start main text
_lowerCAmelCase = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
_lowerCAmelCase = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase = st.text_input('''Enter your question here:''', '''''')
else:
_lowerCAmelCase = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method='''dense''', n_results=10)
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
_lowerCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase = support_list[:10]
_lowerCAmelCase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
_lowerCAmelCase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
_lowerCAmelCase = res[1].strip()
if sec_titles == "":
_lowerCAmelCase = '''[{}]({})'''.format(res[0], wiki_url)
else:
_lowerCAmelCase = sec_titles.split(''' & ''')
_lowerCAmelCase = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase = find_nearest_training(question)
_lowerCAmelCase = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
_lowerCAmelCase = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
_lowerCAmelCase = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 298
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A :
'''simple docstring'''
A = BlenderbotSmallConfig
A = {}
A = "gelu"
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=2_0 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Union[str, Any] = seq_length
__UpperCamelCase : List[Any] = is_training
__UpperCamelCase : str = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : List[Any] = hidden_size
__UpperCamelCase : List[str] = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : Tuple = attention_probs_dropout_prob
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : List[Any] = pad_token_id
__UpperCamelCase : Dict = bos_token_id
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase : Union[str, Any] = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Any = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
__UpperCamelCase : Tuple = inputs_dict["input_ids"]
__UpperCamelCase : Optional[Any] = input_ids[:1, :]
__UpperCamelCase : Optional[Any] = inputs_dict["attention_mask"][:1, :]
__UpperCamelCase : Tuple = inputs_dict["head_mask"]
__UpperCamelCase : List[str] = 1
# first forward pass
__UpperCamelCase : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__UpperCamelCase : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase : str = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ):
if attention_mask is None:
__UpperCamelCase : List[str] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
A = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
A = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
A = True
A = False
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Any = TFBlenderbotSmallModelTester(self )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Any:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
A = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
A = "facebook/blenderbot_small-90M"
@cached_property
def a_ (self ) -> Optional[int]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
__UpperCamelCase : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , )
__UpperCamelCase : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__UpperCamelCase : int = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__UpperCamelCase : List[str] = nn.Parameter(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# set torch weights for 1-to-1 comparison
__UpperCamelCase : Any = np.asarray(weights[0] )
__UpperCamelCase : Tuple = np.asarray(weights[1] )
__UpperCamelCase : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# set torch weights for 1-to-1 comparison
__UpperCamelCase : Dict = np.asarray(weights[0] )
__UpperCamelCase : List[str] = np.asarray(weights[1] )
__UpperCamelCase : Any = np.asarray(weights[2] )
__UpperCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# layernorm 1
__UpperCamelCase : Any = weights[0][0][0]
__UpperCamelCase : List[str] = np.asarray(layer_norm_a[0] )
__UpperCamelCase : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# lsh weights + output
__UpperCamelCase : Union[str, Any] = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__ )
# intermediate weighs
__UpperCamelCase : Tuple = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
__UpperCamelCase : int = intermediate_weights[2]
# layernorm 2
__UpperCamelCase : List[Any] = np.asarray(intermediate_weights[0][0] )
__UpperCamelCase : List[str] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# intermediate dense
__UpperCamelCase : str = np.asarray(intermediate_weights[1][0] )
__UpperCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
# intermediate out
__UpperCamelCase : str = np.asarray(intermediate_weights[4][0] )
__UpperCamelCase : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# reformer model
__UpperCamelCase : str = torch_model.reformer
# word embeds
__UpperCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__ ) , )
if isinstance(weights[3] , snake_case__ ):
__UpperCamelCase : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__UpperCamelCase : Optional[int] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__UpperCamelCase : List[str] = nn.Parameter(torch.tensor(snake_case__ ) )
__UpperCamelCase : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__UpperCamelCase : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__ )
# output layer norm
__UpperCamelCase : List[str] = np.asarray(weights[7][0] )
__UpperCamelCase : str = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , )
# output embeddings
__UpperCamelCase : Optional[int] = np.asarray(weights[9][0] )
__UpperCamelCase : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
# Initialise PyTorch model
__UpperCamelCase : Optional[int] = ReformerConfig.from_json_file(snake_case__ )
print(F"Building PyTorch model from configuration: {config}" )
__UpperCamelCase : Any = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ , "rb" ) as f:
__UpperCamelCase : Any = pickle.load(snake_case__ )["weights"]
set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(snake_case__ , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = _distribute_shards(**snake_case__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = _split_gen_kwargs(snake_case__ , snake_case__ )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if expected is RuntimeError:
with pytest.raises(snake_case__ ):
_number_of_shards_in_gen_kwargs(snake_case__ )
else:
__UpperCamelCase : Optional[int] = _number_of_shards_in_gen_kwargs(snake_case__ )
assert out == expected
| 298
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCAmelCase = TypeVar('''T''')
class A ( Generic[T] ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
__UpperCamelCase : Any | T = None
__UpperCamelCase : int = len(_UpperCAmelCase )
__UpperCamelCase : list[T] = [any_type for _ in range(self.N )] + arr
__UpperCamelCase : Optional[int] = fnc
self.build()
def a_ (self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
__UpperCamelCase : List[str] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> None:
p += self.N
__UpperCamelCase : Any = v
while p > 1:
__UpperCamelCase : Tuple = p // 2
__UpperCamelCase : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> T | None: # noqa: E741
__UpperCamelCase , __UpperCamelCase : Optional[Any] = l + self.N, r + self.N
__UpperCamelCase : T | None = None
while l <= r:
if l % 2 == 1:
__UpperCamelCase : Optional[int] = self.st[l] if res is None else self.fn(_UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__UpperCamelCase : Any = self.st[r] if res is None else self.fn(_UpperCAmelCase , self.st[r] )
__UpperCamelCase , __UpperCamelCase : List[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCAmelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCAmelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCAmelCase = SegmentTree(test_array, min)
_lowerCAmelCase = SegmentTree(test_array, max)
_lowerCAmelCase = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCAmelCase ( ):
for i in range(len(snake_case__ ) ):
for j in range(snake_case__ , len(snake_case__ ) ):
__UpperCamelCase : Any = reduce(snake_case__ , test_array[i : j + 1] )
__UpperCamelCase : Dict = reduce(snake_case__ , test_array[i : j + 1] )
__UpperCamelCase : str = reduce(lambda snake_case__ , snake_case__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case__ , snake_case__ )
assert max_range == max_segment_tree.query(snake_case__ , snake_case__ )
assert sum_range == sum_segment_tree.query(snake_case__ , snake_case__ )
test_all_segments()
for index, value in test_updates.items():
_lowerCAmelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
if num < 0:
return False
__UpperCamelCase : int = num
__UpperCamelCase : int = 0
while num > 0:
__UpperCamelCase : Optional[Any] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {}
_lowerCAmelCase = {}
_lowerCAmelCase = {}
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = None , ):
__UpperCamelCase : Tuple = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
__UpperCamelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
__UpperCamelCase : str = format_type
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ = None ):
__UpperCamelCase : Any = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__UpperCamelCase : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __lowerCAmelCase ( snake_case__ ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __lowerCAmelCase ( snake_case__ , **snake_case__ ):
__UpperCamelCase : Tuple = get_format_type_from_alias(snake_case__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["pixel_values"]
def __init__(self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = size if size is not None else {"shortest_edge": 2_5_6}
__UpperCamelCase : Union[str, Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__UpperCamelCase : List[str] = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
__UpperCamelCase : Optional[int] = do_resize
__UpperCamelCase : Optional[int] = size
__UpperCamelCase : int = resample
__UpperCamelCase : Optional[Any] = do_center_crop
__UpperCamelCase : Any = crop_size
__UpperCamelCase : Any = do_rescale
__UpperCamelCase : Dict = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
__UpperCamelCase : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__UpperCamelCase : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase , size=size["shortest_edge"] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
__UpperCamelCase : List[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ) -> np.ndarray:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> Union[str, Any]:
__UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase : Any = size if size is not None else self.size
__UpperCamelCase : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__UpperCamelCase : int = resample if resample is not None else self.resample
__UpperCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase : str = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase : Optional[int] = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
__UpperCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
__UpperCamelCase : List[Any] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCamelCase : List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__UpperCamelCase : Optional[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
__UpperCamelCase : List[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase : Any = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__UpperCamelCase : Dict = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__UpperCamelCase : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__UpperCamelCase : List[str] = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> int:
__UpperCamelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_UpperCAmelCase ):
__UpperCamelCase : List[str] = target_sizes.numpy()
__UpperCamelCase : Dict = []
for idx in range(len(_UpperCAmelCase ) ):
__UpperCamelCase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCAmelCase )
__UpperCamelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_UpperCAmelCase )
else:
__UpperCamelCase : Dict = logits.argmax(dim=1 )
__UpperCamelCase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = ShapEImgaImgPipeline
A = ["image"]
A = ["image"]
A = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
A = False
@property
def a_ (self ) -> Any:
return 3_2
@property
def a_ (self ) -> Optional[Any]:
return 3_2
@property
def a_ (self ) -> Tuple:
return self.time_input_dim * 4
@property
def a_ (self ) -> Dict:
return 8
@property
def a_ (self ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__UpperCamelCase : Dict = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def a_ (self ) -> str:
__UpperCamelCase : Optional[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def a_ (self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase : Dict = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCamelCase : str = PriorTransformer(**_UpperCAmelCase )
return model
@property
def a_ (self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase : List[str] = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCamelCase : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.dummy_prior
__UpperCamelCase : List[str] = self.dummy_image_encoder
__UpperCamelCase : Union[str, Any] = self.dummy_image_processor
__UpperCamelCase : str = self.dummy_renderer
__UpperCamelCase : str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__UpperCamelCase : Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[str]:
__UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("mps" ):
__UpperCamelCase : Any = torch.manual_seed(_UpperCAmelCase )
else:
__UpperCamelCase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__UpperCamelCase : Dict = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : Any = self.get_dummy_components()
__UpperCamelCase : Tuple = self.pipeline_class(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__UpperCamelCase : List[str] = output.images[0]
__UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
__UpperCamelCase : Any = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ (self ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a_ (self ) -> Dict:
__UpperCamelCase : int = torch_device == "cpu"
__UpperCamelCase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def a_ (self ) -> str:
__UpperCamelCase : int = self.get_dummy_components()
__UpperCamelCase : int = self.pipeline_class(**_UpperCAmelCase )
__UpperCamelCase : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : str = 1
__UpperCamelCase : Optional[Any] = 2
__UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__UpperCamelCase : List[Any] = batch_size * [inputs[key]]
__UpperCamelCase : Optional[Any] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self ) -> Any:
__UpperCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
__UpperCamelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
__UpperCamelCase : Optional[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
__UpperCamelCase : List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__UpperCamelCase : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__UpperCamelCase : Union[str, Any] = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCAmelCase = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
_lowerCAmelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCAmelCase = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
_lowerCAmelCase = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
_lowerCAmelCase = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
_lowerCAmelCase = ''''''
_lowerCAmelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
_lowerCAmelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCAmelCase = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert ReadMe.from_string(snake_case__ , snake_case__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path="root" ) ) ):
__UpperCamelCase : Optional[Any] = ReadMe.from_string(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with pytest.raises(snake_case__ , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( snake_case__ ):
ReadMe.from_string(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Dict = Path(snake_case__ ) / "README.md"
with open(snake_case__ , "w+" ) as readme_file:
readme_file.write(snake_case__ )
__UpperCamelCase : List[Any] = ReadMe.from_readme(snake_case__ , snake_case__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] = Path(snake_case__ ) / "README.md"
with open(snake_case__ , "w+" ) as readme_file:
readme_file.write(snake_case__ )
__UpperCamelCase : Optional[Any] = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
__UpperCamelCase : Any = ReadMe.from_readme(snake_case__ , snake_case__ )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : str = Path(snake_case__ ) / "README.md"
with open(snake_case__ , "w+" ) as readme_file:
readme_file.write(snake_case__ )
__UpperCamelCase : str = expected_error.format(path=snake_case__ )
with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ):
ReadMe.from_readme(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __lowerCAmelCase ( snake_case__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : List[str] = Path(snake_case__ ) / "README.md"
with open(snake_case__ , "w+" ) as readme_file:
readme_file.write(snake_case__ )
ReadMe.from_readme(snake_case__ , snake_case__ , suppress_parsing_errors=snake_case__ )
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger('''transformers.models.speecht5''')
_lowerCAmelCase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
_lowerCAmelCase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
_lowerCAmelCase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
_lowerCAmelCase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
_lowerCAmelCase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
_lowerCAmelCase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
_lowerCAmelCase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
_lowerCAmelCase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
_lowerCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase = []
_lowerCAmelCase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
_lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
_lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
_lowerCAmelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
for attribute in key.split("." ):
__UpperCamelCase : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
__UpperCamelCase : Tuple = getattr(snake_case__ , snake_case__ ).shape
else:
__UpperCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase : Optional[int] = value
elif weight_type == "weight_g":
__UpperCamelCase : Tuple = value
elif weight_type == "weight_v":
__UpperCamelCase : int = value
elif weight_type == "bias":
__UpperCamelCase : List[str] = value
elif weight_type == "running_mean":
__UpperCamelCase : List[Any] = value
elif weight_type == "running_var":
__UpperCamelCase : Tuple = value
elif weight_type == "num_batches_tracked":
__UpperCamelCase : List[Any] = value
else:
__UpperCamelCase : Union[str, Any] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : str = []
if task == "s2t":
__UpperCamelCase : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase : List[str] = MAPPING_S2T
__UpperCamelCase : Tuple = IGNORE_KEYS_S2T
elif task == "t2s":
__UpperCamelCase : Dict = None
__UpperCamelCase : int = MAPPING_T2S
__UpperCamelCase : List[Any] = IGNORE_KEYS_T2S
elif task == "s2s":
__UpperCamelCase : Dict = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase : List[str] = MAPPING_S2S
__UpperCamelCase : Any = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case__ , snake_case__ ):
logger.info(F"{name} was ignored" )
continue
__UpperCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__UpperCamelCase , __UpperCamelCase : Dict = key.split(".*." )
if prefix in name and suffix in name:
__UpperCamelCase : Any = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__UpperCamelCase : List[Any] = True
if "*" in mapped_key:
__UpperCamelCase : str = name.split(snake_case__ )[0].split("." )[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , snake_case__ )
if "weight_g" in name:
__UpperCamelCase : int = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : str = "weight_v"
elif "bias" in name:
__UpperCamelCase : Optional[Any] = "bias"
elif "weight" in name:
__UpperCamelCase : List[str] = "weight"
elif "running_mean" in name:
__UpperCamelCase : Tuple = "running_mean"
elif "running_var" in name:
__UpperCamelCase : str = "running_var"
elif "num_batches_tracked" in name:
__UpperCamelCase : Optional[int] = "num_batches_tracked"
else:
__UpperCamelCase : Tuple = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = full_name.split("conv_layers." )[-1]
__UpperCamelCase : List[str] = name.split("." )
__UpperCamelCase : Optional[Any] = int(items[0] )
__UpperCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase : Dict = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase : Tuple = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , ):
if config_path is not None:
__UpperCamelCase : str = SpeechTaConfig.from_pretrained(snake_case__ )
else:
__UpperCamelCase : Any = SpeechTaConfig()
if task == "s2t":
__UpperCamelCase : Union[str, Any] = config.max_text_positions
__UpperCamelCase : Dict = SpeechTaForSpeechToText(snake_case__ )
elif task == "t2s":
__UpperCamelCase : Tuple = 1_876
__UpperCamelCase : Any = 600
__UpperCamelCase : List[Any] = config.max_speech_positions
__UpperCamelCase : str = SpeechTaForTextToSpeech(snake_case__ )
elif task == "s2s":
__UpperCamelCase : Dict = 1_876
__UpperCamelCase : List[Any] = config.max_speech_positions
__UpperCamelCase : List[Any] = SpeechTaForSpeechToSpeech(snake_case__ )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
__UpperCamelCase : List[str] = SpeechTaTokenizer(snake_case__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__UpperCamelCase : List[Any] = AddedToken("<mask>" , lstrip=snake_case__ , rstrip=snake_case__ )
__UpperCamelCase : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
__UpperCamelCase : Optional[Any] = SpeechTaFeatureExtractor()
__UpperCamelCase : Any = SpeechTaProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ )
processor.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[int] = torch.load(snake_case__ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case__ , snake_case__ )
model.save_pretrained(snake_case__ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case__ )
model.push_to_hub(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : int = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCamelCase : Optional[Any] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
__UpperCamelCase : Optional[int] = F"{src_lang}-{tgt_lang}"
__UpperCamelCase : Dict = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__UpperCamelCase : List[str] = os.path.join(snake_case__ , "README.md" )
print(F"Generating {path}" )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
_lowerCAmelCase = Path(__file__).resolve().parent.parent.parent
_lowerCAmelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model_name.split('''-''')
_lowerCAmelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = current_set.copy()
for row_index, row in enumerate(snake_case__ ):
__UpperCamelCase : List[str] = row[0]
for column_index, column in enumerate(snake_case__ ):
if magnitude == 0:
__UpperCamelCase : str = column
continue
__UpperCamelCase : Any = column / magnitude
# Subtract to cancel term
__UpperCamelCase : Optional[Any] = current_set[0]
__UpperCamelCase : List[Any] = [first_row]
__UpperCamelCase : Optional[Any] = current_set[1::]
for row in current_set:
__UpperCamelCase : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case__ )
continue
for column_index in range(len(snake_case__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__UpperCamelCase : int = final_set[0]
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__UpperCamelCase : Dict = simplify(snake_case__ )
for i in range(len(snake_case__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case__ )
__UpperCamelCase : Union[str, Any] = resultant
return final_set
def __lowerCAmelCase ( snake_case__ ):
if len(snake_case__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
__UpperCamelCase : List[str] = len(snake_case__ ) + 1
if any(len(snake_case__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(snake_case__ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(snake_case__ ) == 1:
return [equations[0][-1] / equations[0][0]]
__UpperCamelCase : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
__UpperCamelCase : List[str] = data_set.copy()
__UpperCamelCase : Optional[Any] = []
for row_index, row in enumerate(snake_case__ ):
if 0 not in row:
__UpperCamelCase : Optional[int] = data_set.pop(snake_case__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , snake_case__ )
__UpperCamelCase : Optional[int] = data_set.copy()
__UpperCamelCase : Dict = simplify(snake_case__ )
__UpperCamelCase : Optional[int] = simplified[::-1]
__UpperCamelCase : list = []
for row in simplified:
__UpperCamelCase : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__UpperCamelCase : Dict = row.copy()[: len(snake_case__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case__ ) == 0:
solutions.append(0 )
continue
__UpperCamelCase : Union[str, Any] = temp_row[1::]
__UpperCamelCase : List[str] = temp_row[::-1]
for column_index, column in enumerate(snake_case__ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case__ )
__UpperCamelCase : Dict = []
for item in solutions:
final.append(float(round(snake_case__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase = logging.getLogger(__name__)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
# save results
if os.path.exists(snake_case__ ):
if os.path.exists(os.path.join(snake_case__ , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case__ , "config.json" ) ):
os.remove(os.path.join(snake_case__ , "config.json" ) )
if os.path.exists(os.path.join(snake_case__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case__ , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case__ )
model.save_pretrained(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase : List[str] = 2
if unlogit:
__UpperCamelCase : Optional[int] = torch.pow(snake_case__ , snake_case__ )
__UpperCamelCase : List[Any] = p * torch.log(snake_case__ )
__UpperCamelCase : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def __lowerCAmelCase ( snake_case__ ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(snake_case__ ) ) ) )
for row in range(len(snake_case__ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase : str = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
__UpperCamelCase : List[Any] = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
if head_mask is None:
__UpperCamelCase : Any = torch.ones(snake_case__ , snake_case__ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase : List[str] = None
__UpperCamelCase : Union[str, Any] = 0.0
__UpperCamelCase : Any = 0.0
for step, inputs in enumerate(tqdm(snake_case__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__UpperCamelCase : Dict = tuple(t.to(args.device ) for t in inputs )
((__UpperCamelCase) , ) : str = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase : Tuple = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case__ ):
__UpperCamelCase : Dict = entropy(attn.detach() , snake_case__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase : Any = 2
__UpperCamelCase : Union[str, Any] = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__UpperCamelCase : int = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case__ )
logger.info("Head ranked by importance scores" )
__UpperCamelCase : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__UpperCamelCase : Dict = torch.arange(
head_importance.numel() , device=args.device )
__UpperCamelCase : List[str] = head_ranks.view_as(snake_case__ )
print_ad_tensor(snake_case__ )
return attn_entropy, head_importance, total_loss
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ )
__UpperCamelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case__ , original_score * args.masking_threshold )
__UpperCamelCase : str = torch.ones_like(snake_case__ )
__UpperCamelCase : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__UpperCamelCase : Union[str, Any] = original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase : List[Any] = float("Inf" )
__UpperCamelCase : str = head_importance.view(-1 ).sort()[1]
if len(snake_case__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__UpperCamelCase : Optional[Any] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__UpperCamelCase : Any = new_head_mask.view(-1 )
__UpperCamelCase : Any = 0.0
__UpperCamelCase : Dict = new_head_mask.view_as(snake_case__ )
__UpperCamelCase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(snake_case__ )
# Compute metric and head importance again
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ )
__UpperCamelCase : int = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ )
__UpperCamelCase : Optional[Any] = 1 / loss
__UpperCamelCase : Dict = datetime.now() - before_time
__UpperCamelCase : List[Any] = sum(p.numel() for p in model.parameters() )
__UpperCamelCase : List[str] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = [
v,
]
assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case__ )
__UpperCamelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
__UpperCamelCase : Union[str, Any] = datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , )
__UpperCamelCase : int = 1 / loss
__UpperCamelCase : List[Any] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case__ , snake_case__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(snake_case__ , args.output_dir )
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case__ , type=snake_case__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case__ , help="Batch size." )
parser.add_argument("--seed" , type=snake_case__ , default=42 )
parser.add_argument("--local_rank" , type=snake_case__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case__ , default="" , help="Can be used for distant debugging." )
__UpperCamelCase : Union[str, Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase : Dict = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__UpperCamelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__UpperCamelCase : Optional[Any] = torch.device("cuda" , args.local_rank )
__UpperCamelCase : Any = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__UpperCamelCase : Tuple = nn.parallel.DistributedDataParallel(
snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ )
elif args.n_gpu > 1:
__UpperCamelCase : Optional[Any] = nn.DataParallel(snake_case__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case__ )
# Prepare dataset
__UpperCamelCase : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__UpperCamelCase : Any = (torch.from_numpy(snake_case__ ),)
__UpperCamelCase : Tuple = TensorDataset(*snake_case__ )
__UpperCamelCase : int = RandomSampler(snake_case__ )
__UpperCamelCase : List[str] = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case__ , snake_case__ , snake_case__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase : Optional[int] = mask_heads(snake_case__ , snake_case__ , snake_case__ )
prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''T5Config'''
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "mt5"
A = MTaConfig
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
_lowerCAmelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
_lowerCAmelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "dinat"
A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=6_4 , _UpperCAmelCase=[3, 4, 6, 5] , _UpperCAmelCase=[2, 4, 8, 1_6] , _UpperCAmelCase=7 , _UpperCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCAmelCase=3.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Tuple = patch_size
__UpperCamelCase : Tuple = num_channels
__UpperCamelCase : Optional[Any] = embed_dim
__UpperCamelCase : Any = depths
__UpperCamelCase : List[str] = len(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = num_heads
__UpperCamelCase : str = kernel_size
__UpperCamelCase : Optional[Any] = dilations
__UpperCamelCase : Any = mlp_ratio
__UpperCamelCase : Optional[int] = qkv_bias
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : Optional[int] = attention_probs_dropout_prob
__UpperCamelCase : int = drop_path_rate
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Any = layer_norm_eps
__UpperCamelCase : str = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase : Union[str, Any] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__UpperCamelCase : Union[str, Any] = layer_scale_init_value
__UpperCamelCase : Tuple = ["stem"] + [f"stage{idx}" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__UpperCamelCase , __UpperCamelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 1
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
__UpperCamelCase : Optional[int] = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__UpperCamelCase : str = components[:-1] + [test_fn.replace(".py" , "" )]
__UpperCamelCase : Dict = ".".join(snake_case__ )
return test_module_path
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = get_module_path(snake_case__ )
__UpperCamelCase : List[Any] = importlib.import_module(snake_case__ )
return test_module
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = []
__UpperCamelCase : Optional[int] = get_test_module(snake_case__ )
for attr in dir(snake_case__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case__ , snake_case__ ) )
# sort with class names
return sorted(snake_case__ , key=lambda snake_case__ : x.__name__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = []
__UpperCamelCase : Union[str, Any] = get_test_module(snake_case__ )
for attr in dir(snake_case__ ):
__UpperCamelCase : List[Any] = getattr(snake_case__ , snake_case__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCamelCase : Optional[Any] = getattr(snake_case__ , "all_model_classes" , [] )
if len(snake_case__ ) > 0:
test_classes.append(snake_case__ )
# sort with class names
return sorted(snake_case__ , key=lambda snake_case__ : x.__name__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = get_test_classes(snake_case__ )
__UpperCamelCase : List[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case__ , key=lambda snake_case__ : x.__name__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[Any] = test_class()
if hasattr(snake_case__ , "setUp" ):
test.setUp()
__UpperCamelCase : int = None
if hasattr(snake_case__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = get_test_classes(snake_case__ )
__UpperCamelCase : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case__ )
# sort with class names
return sorted(snake_case__ , key=lambda snake_case__ : x.__name__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = get_test_classes_for_model(snake_case__ , snake_case__ )
__UpperCamelCase : str = []
for test_class in test_classes:
__UpperCamelCase : Union[str, Any] = get_model_tester_from_test_class(snake_case__ )
if tester_class is not None:
tester_classes.append(snake_case__ )
# sort with class names
return sorted(snake_case__ , key=lambda snake_case__ : x.__name__ )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = get_test_classes(snake_case__ )
__UpperCamelCase : Union[str, Any] = {test_class: get_model_tester_from_test_class(snake_case__ ) for test_class in test_classes}
return test_tester_mapping
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = get_model_classes(snake_case__ )
__UpperCamelCase : Union[str, Any] = {
model_class: get_test_classes_for_model(snake_case__ , snake_case__ ) for model_class in model_classes
}
return model_test_mapping
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = get_model_classes(snake_case__ )
__UpperCamelCase : List[Any] = {
model_class: get_tester_classes_for_model(snake_case__ , snake_case__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __lowerCAmelCase ( snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
return o
elif isinstance(snake_case__ , snake_case__ ):
return o.__name__
elif isinstance(snake_case__ , (list, tuple) ):
return [to_json(snake_case__ ) for x in o]
elif isinstance(snake_case__ , snake_case__ ):
return {to_json(snake_case__ ): to_json(snake_case__ ) for k, v in o.items()}
else:
return o
| 298
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 1
|
'''simple docstring'''
import math
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = len(snake_case__ )
__UpperCamelCase : int = int(math.floor(math.sqrt(snake_case__ ) ) )
__UpperCamelCase : Tuple = 0
while arr[min(snake_case__ , snake_case__ ) - 1] < x:
__UpperCamelCase : List[str] = step
step += int(math.floor(math.sqrt(snake_case__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__UpperCamelCase : int = prev + 1
if prev == min(snake_case__ , snake_case__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
_lowerCAmelCase = int(input('''Enter the number to be searched:\n'''))
_lowerCAmelCase = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'Number {x} is at index {res}')
| 298
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 1
|
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
_lowerCAmelCase = dataset.iloc[:, 1:2].values
_lowerCAmelCase = dataset.iloc[:, 2].values
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase = PolynomialFeatures(degree=4)
_lowerCAmelCase = poly_reg.fit_transform(X)
_lowerCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCAmelCase ( ):
plt.scatter(snake_case__ , snake_case__ , color="red" )
plt.plot(snake_case__ , pol_reg.predict(poly_reg.fit_transform(snake_case__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 298
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 1
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = state_dict.pop(snake_case__ )
__UpperCamelCase : int = val
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase : List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__UpperCamelCase : List[str] = value
else:
__UpperCamelCase : Dict = value
return new_state_dict
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase : Union[str, Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[:256, :]
__UpperCamelCase : List[Any] = in_proj_bias[:256]
__UpperCamelCase : str = in_proj_weight[256:512, :]
__UpperCamelCase : str = in_proj_bias[256:512]
__UpperCamelCase : List[str] = in_proj_weight[-256:, :]
__UpperCamelCase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase : Optional[Any] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase : List[str] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : List[Any] = in_proj_weight[:256, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[:256]
__UpperCamelCase : List[Any] = in_proj_weight[256:512, :]
__UpperCamelCase : Tuple = in_proj_bias[256:512]
__UpperCamelCase : Optional[Any] = in_proj_weight[-256:, :]
__UpperCamelCase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCamelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__UpperCamelCase : Tuple = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCamelCase : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
__UpperCamelCase : Tuple = in_proj_bias_cross_attn[:256]
__UpperCamelCase : Optional[int] = in_proj_weight_cross_attn[256:512, :]
__UpperCamelCase : int = in_proj_bias_cross_attn[256:512]
__UpperCamelCase : List[str] = in_proj_weight_cross_attn[-256:, :]
__UpperCamelCase : Tuple = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.size
__UpperCamelCase : str = max(snake_case__ , snake_case__ )
__UpperCamelCase : Tuple = 800 if "detection" in checkpoint_url else 1_000
__UpperCamelCase : Optional[int] = target_max_size / current_max_size
__UpperCamelCase : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = F.to_tensor(snake_case__ )
__UpperCamelCase : List[Any] = F.normalize(snake_case__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
logger.info("Converting model..." )
# load original state dict
__UpperCamelCase : List[str] = torch.hub.load_state_dict_from_url(snake_case__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : List[str] = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase : Optional[int] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCamelCase : Tuple = state_dict.pop(snake_case__ )
__UpperCamelCase : int = val
# create HuggingFace model and load state dict
__UpperCamelCase : int = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__UpperCamelCase : Any = 15
__UpperCamelCase : Union[str, Any] = 2
__UpperCamelCase : Union[str, Any] = {0: "table", 1: "table rotated"}
__UpperCamelCase : Union[str, Any] = idalabel
__UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
__UpperCamelCase : Tuple = 125
__UpperCamelCase : Optional[Any] = 6
__UpperCamelCase : int = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
__UpperCamelCase : Union[str, Any] = idalabel
__UpperCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
__UpperCamelCase : int = TableTransformerForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion
__UpperCamelCase : List[str] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
__UpperCamelCase : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=snake_case__ )
__UpperCamelCase : List[Any] = Image.open(snake_case__ ).convert("RGB" )
__UpperCamelCase : List[str] = normalize(resize(snake_case__ , snake_case__ ) ).unsqueeze(0 )
__UpperCamelCase : Dict = model(snake_case__ )
if "detection" in checkpoint_url:
__UpperCamelCase : Union[str, Any] = (1, 15, 3)
__UpperCamelCase : List[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__UpperCamelCase : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__UpperCamelCase : Any = (1, 125, 7)
__UpperCamelCase : Optional[int] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__UpperCamelCase : int = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
__UpperCamelCase : List[str] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(snake_case__ )
image_processor.push_to_hub(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = 0
for ch in input_str:
__UpperCamelCase : int = ord(snake_case__ )
__UpperCamelCase : Optional[int] = pow(2 , snake_case__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
__UpperCamelCase : Any = tokenizer("Hello there" , return_tensors="tf" ).input_ids
__UpperCamelCase : List[str] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
__UpperCamelCase : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss
__UpperCamelCase : int = -tf.math.reduce_mean(_UpperCAmelCase ).numpy()
__UpperCamelCase : int = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 298
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = '''Hello world! cécé herlolip'''
_lowerCAmelCase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = BertAbsConfig(
temp_dir="." , finetune_bert=snake_case__ , large=snake_case__ , share_emb=snake_case__ , use_bert_emb=snake_case__ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCamelCase : Tuple = torch.load(snake_case__ , lambda snake_case__ , snake_case__ : storage )
__UpperCamelCase : Union[str, Any] = AbsSummarizer(snake_case__ , torch.device("cpu" ) , snake_case__ )
original.eval()
__UpperCamelCase : Optional[Any] = BertAbsSummarizer(snake_case__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
__UpperCamelCase : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
__UpperCamelCase : Tuple = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
__UpperCamelCase : List[str] = torch.tensor(snake_case__ ).unsqueeze(0 )
__UpperCamelCase : Union[str, Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case__ )) )
__UpperCamelCase : Optional[int] = torch.tensor(snake_case__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCamelCase : int = encoder_input_ids
__UpperCamelCase : str = decoder_input_ids
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = None
__UpperCamelCase : Tuple = None
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Any = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCamelCase : Optional[int] = original(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
__UpperCamelCase : List[Any] = original.generator(snake_case__ )
__UpperCamelCase : Any = new_model(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )[0]
__UpperCamelCase : List[str] = new_model.generator(snake_case__ )
__UpperCamelCase : Any = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case__ ) )
__UpperCamelCase : List[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case__ ) )
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
import sys
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = len(snake_case__ )
__UpperCamelCase : List[str] = [[0 for x in range(snake_case__ )] for x in range(snake_case__ )]
__UpperCamelCase : Dict = [[0 for x in range(snake_case__ )] for x in range(snake_case__ )]
for chain_length in range(2 , snake_case__ ):
for a in range(1 , n - chain_length + 1 ):
__UpperCamelCase : List[Any] = a + chain_length - 1
__UpperCamelCase : Tuple = sys.maxsize
for c in range(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__UpperCamelCase : List[str] = cost
__UpperCamelCase : Union[str, Any] = c
return matrix, sol
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if i == j:
print("A" + str(snake_case__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(snake_case__ , snake_case__ , optimal_solution[i][j] )
print_optiomal_solution(snake_case__ , optimal_solution[i][j] + 1 , snake_case__ )
print(")" , end=" " )
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = [30, 35, 15, 5, 10, 20, 25]
__UpperCamelCase : Optional[int] = len(snake_case__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = matrix_chain_order(snake_case__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(snake_case__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
import pprint
import requests
_lowerCAmelCase = '''https://zenquotes.io/api'''
def __lowerCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __lowerCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_lowerCAmelCase = random_quotes()
pprint.pprint(response)
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ) -> Dict:
super().__init__()
__UpperCamelCase : Dict = pad_token_id
__UpperCamelCase : Tuple = max_length
__UpperCamelCase : List[str] = vocab
__UpperCamelCase : Tuple = merges
__UpperCamelCase : Optional[int] = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def a_ (cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = [" ".join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
__UpperCamelCase : Dict = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def a_ (cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
__UpperCamelCase : Tuple = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def a_ (cls , _UpperCAmelCase ) -> List[Any]:
return cls(**_UpperCAmelCase )
def a_ (self ) -> str:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[str]:
__UpperCamelCase : Tuple = self.tf_tokenizer(_UpperCAmelCase )
__UpperCamelCase : Dict = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
__UpperCamelCase : Any = max_length if max_length is not None else self.max_length
if max_length is not None:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_0_0 , _UpperCAmelCase=1_3 , _UpperCAmelCase=3_0 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=[0, 1, 2, 3] , ) -> Tuple:
__UpperCamelCase : int = parent
__UpperCamelCase : Dict = 1_0_0
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = image_size
__UpperCamelCase : int = patch_size
__UpperCamelCase : Any = num_channels
__UpperCamelCase : Dict = is_training
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : List[Any] = intermediate_size
__UpperCamelCase : int = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Optional[int] = attention_probs_dropout_prob
__UpperCamelCase : Dict = type_sequence_label_size
__UpperCamelCase : Union[str, Any] = initializer_range
__UpperCamelCase : List[Any] = scope
__UpperCamelCase : List[str] = out_indices
__UpperCamelCase : str = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
__UpperCamelCase : str = num_patches + 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__UpperCamelCase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ (self ) -> int:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Any = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Optional[Any] = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
__UpperCamelCase : Any = self.type_sequence_label_size
__UpperCamelCase : Dict = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Any = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[int] = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__UpperCamelCase : int = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Any = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int = config_and_inputs
__UpperCamelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : List[Any] = BeitModelTester(self )
__UpperCamelCase : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def a_ (self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ (self ) -> str:
pass
def a_ (self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> List[str]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def a_ (self ) -> Dict:
if not self.model_tester.is_training:
return
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
__UpperCamelCase : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__UpperCamelCase : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__UpperCamelCase : Any = model(**_UpperCAmelCase ).loss
loss.backward()
def a_ (self ) -> str:
__UpperCamelCase , __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__UpperCamelCase : str = False
__UpperCamelCase : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCamelCase : List[Any] = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
__UpperCamelCase : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__UpperCamelCase : Dict = model(**_UpperCAmelCase ).loss
loss.backward()
def a_ (self ) -> str:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Tuple = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Tuple = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def a_ (self ) -> Optional[Any]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Tuple = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ( ):
__UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ (self ) -> List[str]:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : str = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(_UpperCAmelCase )
__UpperCamelCase : str = self.default_image_processor
__UpperCamelCase : Tuple = prepare_img()
__UpperCamelCase : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
__UpperCamelCase : Any = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : List[Any] = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
__UpperCamelCase : List[str] = outputs.logits
# verify the logits
__UpperCamelCase : Optional[int] = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__UpperCamelCase : Tuple = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) )
@slow
def a_ (self ) -> Any:
__UpperCamelCase : Union[str, Any] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(_UpperCAmelCase )
__UpperCamelCase : List[str] = self.default_image_processor
__UpperCamelCase : Optional[int] = prepare_img()
__UpperCamelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : Union[str, Any] = model(**_UpperCAmelCase )
__UpperCamelCase : List[str] = outputs.logits
# verify the logits
__UpperCamelCase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__UpperCamelCase : Any = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase : Dict = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : str = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
_UpperCAmelCase )
__UpperCamelCase : int = self.default_image_processor
__UpperCamelCase : Optional[int] = prepare_img()
__UpperCamelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : int = model(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = outputs.logits
# verify the logits
__UpperCamelCase : List[Any] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__UpperCamelCase : Any = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
__UpperCamelCase : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : int = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__UpperCamelCase : Optional[Any] = model.to(_UpperCAmelCase )
__UpperCamelCase : List[str] = BeitImageProcessor(do_resize=_UpperCAmelCase , size=6_4_0 , do_center_crop=_UpperCAmelCase )
__UpperCamelCase : Dict = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__UpperCamelCase : List[str] = Image.open(ds[0]["file"] )
__UpperCamelCase : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : str = model(**_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
__UpperCamelCase : Optional[Any] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__UpperCamelCase : Optional[int] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
__UpperCamelCase : Any = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=_UpperCAmelCase , )
else:
__UpperCamelCase : List[Any] = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
__UpperCamelCase : Any = model.to(_UpperCAmelCase )
__UpperCamelCase : List[str] = BeitImageProcessor(do_resize=_UpperCAmelCase , size=6_4_0 , do_center_crop=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__UpperCamelCase : List[str] = Image.open(ds[0]["file"] )
__UpperCamelCase : int = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase : str = model(**_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.logits.detach().cpu()
__UpperCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(5_0_0, 3_0_0)] )
__UpperCamelCase : Any = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__UpperCamelCase : Dict = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["torch", "scipy"]
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def a_ (cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def a_ (cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> str:
requires_backends(cls , ["torch", "scipy"] )
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A :
'''simple docstring'''
A = 42
# setable values
A = 42
A = 42
A = None
@classmethod
def a_ (cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = 42
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = [e.name for e in FlaxKarrasDiffusionSchedulers]
A = 42
@property
def a_ (self ) -> Dict:
return True
@register_to_config
def __init__(self , _UpperCAmelCase = 1_0_0_0 , _UpperCAmelCase = 0.0_001 , _UpperCAmelCase = 0.02 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "fixed_small" , _UpperCAmelCase = True , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = jnp.floataa , ) -> Tuple:
__UpperCamelCase : Optional[int] = dtype
def a_ (self , _UpperCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
__UpperCamelCase : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__UpperCamelCase : Tuple = jnp.array(1.0 , dtype=self.dtype )
__UpperCamelCase : str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> jnp.ndarray:
return sample
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = () ) -> DDPMSchedulerState:
__UpperCamelCase : Optional[int] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCamelCase : int = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Any:
__UpperCamelCase : Dict = state.common.alphas_cumprod[t]
__UpperCamelCase : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCamelCase : str = jnp.clip(_UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCamelCase : List[str] = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__UpperCamelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCamelCase : Dict = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCamelCase : Tuple = variance
__UpperCamelCase : str = state.common.betas[t]
__UpperCamelCase : Dict = (predicted_variance + 1) / 2
__UpperCamelCase : Any = frac * max_log + (1 - frac) * min_log
return variance
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
__UpperCamelCase : Dict = timestep
if key is None:
__UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCamelCase , __UpperCamelCase : Dict = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
__UpperCamelCase : List[Any] = None
# 1. compute alphas, betas
__UpperCamelCase : Tuple = state.common.alphas_cumprod[t]
__UpperCamelCase : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__UpperCamelCase : Optional[Any] = 1 - alpha_prod_t
__UpperCamelCase : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : Tuple = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCamelCase : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCamelCase : int = jax.random.split(_UpperCAmelCase , num=1 )
__UpperCamelCase : int = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__UpperCamelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__UpperCamelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__(self ) -> List[Any]:
return self.config.num_train_timesteps
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase = get_logger(__name__)
_lowerCAmelCase = R'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class A :
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class A :
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@add_start_docstrings(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> jnp.ndarray:
for processor in self:
__UpperCamelCase : Any = inspect.signature(processor.__call__ ).parameters
if len(_UpperCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
__UpperCamelCase : Any = processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
else:
__UpperCamelCase : int = processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Tuple:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
__UpperCamelCase : int = temperature
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase : Dict = scores / self.temperature
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = -float("Inf" ) , _UpperCAmelCase = 1 ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
__UpperCamelCase : List[Any] = top_p
__UpperCamelCase : int = filter_value
__UpperCamelCase : Optional[int] = min_tokens_to_keep
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase , __UpperCamelCase : List[Any] = lax.top_k(_UpperCAmelCase , scores.shape[-1] )
__UpperCamelCase : Any = jnp.full_like(_UpperCAmelCase , self.filter_value )
__UpperCamelCase : Tuple = jax.nn.softmax(_UpperCAmelCase , axis=-1 ).cumsum(axis=-1 )
__UpperCamelCase : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCamelCase : Optional[int] = jnp.roll(_UpperCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_UpperCAmelCase )
# min tokens to keep
__UpperCamelCase : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(_UpperCAmelCase )
__UpperCamelCase : Any = jnp.where(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : List[str] = jax.lax.sort_key_val(_UpperCAmelCase , _UpperCAmelCase )[-1]
return next_scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = -float("Inf" ) , _UpperCAmelCase = 1 ) -> Dict:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
__UpperCamelCase : Any = max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Optional[Any] = filter_value
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = scores.shape
__UpperCamelCase : Optional[int] = jnp.full(batch_size * vocab_size , self.filter_value )
__UpperCamelCase : Dict = min(self.top_k , scores.shape[-1] ) # Safety check
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = lax.top_k(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : List[Any] = jnp.broadcast_to((jnp.arange(_UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__UpperCamelCase : Optional[int] = topk_scores.flatten()
__UpperCamelCase : Optional[Any] = topk_indices.flatten() + shift
__UpperCamelCase : Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(_UpperCAmelCase )
__UpperCamelCase : str = next_scores_flat.reshape(_UpperCAmelCase , _UpperCAmelCase )
return next_scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : List[Any] = bos_token_id
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase : Tuple = jnp.full(scores.shape , -float("inf" ) )
__UpperCamelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 )
__UpperCamelCase : Union[str, Any] = jnp.where(_UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _UpperCAmelCase )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
__UpperCamelCase : Tuple = max_length
__UpperCamelCase : int = eos_token_id
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase : Optional[int] = jnp.full(scores.shape , -float("inf" ) )
__UpperCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__UpperCamelCase : List[Any] = jnp.where(_UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _UpperCAmelCase )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
__UpperCamelCase : List[Any] = min_length
__UpperCamelCase : Optional[Any] = eos_token_id
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__UpperCamelCase : Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__UpperCamelCase : Union[str, Any] = jnp.where(_UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _UpperCAmelCase )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Tuple = list(_UpperCAmelCase )
__UpperCamelCase : List[Any] = begin_index
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
__UpperCamelCase : str = jnp.where(_UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _UpperCAmelCase )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : List[str] = list(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
__UpperCamelCase : str = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : int = dict(_UpperCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCamelCase : Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCamelCase : Optional[Any] = force_token_array.at[index].set(_UpperCAmelCase )
__UpperCamelCase : Dict = jnp.intaa(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> jnp.ndarray:
def _force_token(_UpperCAmelCase ):
__UpperCamelCase : List[Any] = scores.shape[0]
__UpperCamelCase : str = self.force_token_array[generation_idx]
__UpperCamelCase : List[Any] = jnp.ones_like(_UpperCAmelCase , dtype=scores.dtype ) * -float("inf" )
__UpperCamelCase : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__UpperCamelCase : Dict = lax.dynamic_update_slice(_UpperCAmelCase , _UpperCAmelCase , (0, current_token) )
return new_scores
__UpperCamelCase : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_UpperCAmelCase ) , lambda: scores , ) , )
return scores
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Union[str, Any] = generate_config.eos_token_id
__UpperCamelCase : Union[str, Any] = generate_config.no_timestamps_token_id
__UpperCamelCase : Dict = generate_config.no_timestamps_token_id + 1
__UpperCamelCase : Optional[int] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_UpperCAmelCase , "max_initial_timestamp_index" ):
__UpperCamelCase : List[Any] = generate_config.max_initial_timestamp_index
else:
__UpperCamelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCamelCase : Dict = model_config.vocab_size
def __call__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# suppress <|notimestamps|> which is handled by without_timestamps
__UpperCamelCase : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _UpperCAmelCase , )
__UpperCamelCase : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _UpperCAmelCase , _UpperCAmelCase , )
return jnp.where(
_UpperCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _UpperCAmelCase , )
__UpperCamelCase : Any = jax.vmap(_UpperCAmelCase )(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Optional[int] = jnp.where(cur_len == self.begin_index , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _UpperCAmelCase , )
__UpperCamelCase : int = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCamelCase : Dict = jnp.where(
_UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _UpperCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCamelCase : Optional[Any] = jax.nn.log_softmax(_UpperCAmelCase , axis=-1 )
def handle_cumulative_probs(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__UpperCamelCase : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _UpperCAmelCase , )
__UpperCamelCase : str = jax.vmap(_UpperCAmelCase )(_UpperCAmelCase , _UpperCAmelCase )
return scores
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=3_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1_0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=3 , _UpperCAmelCase=None , ) -> str:
__UpperCamelCase : Optional[int] = parent
__UpperCamelCase : Optional[int] = batch_size
__UpperCamelCase : List[Any] = image_size
__UpperCamelCase : Optional[Any] = num_channels
__UpperCamelCase : Any = num_stages
__UpperCamelCase : Dict = hidden_sizes
__UpperCamelCase : List[Any] = depths
__UpperCamelCase : Dict = is_training
__UpperCamelCase : Optional[Any] = use_labels
__UpperCamelCase : Tuple = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : Dict = type_sequence_label_size
__UpperCamelCase : Dict = initializer_range
__UpperCamelCase : Dict = out_features
__UpperCamelCase : int = num_labels
__UpperCamelCase : Tuple = scope
__UpperCamelCase : Union[str, Any] = num_stages
def a_ (self ) -> List[str]:
__UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Union[str, Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a_ (self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def a_ (self ) -> List[str]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCAmelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Optional[Any] = UperNetForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : str = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : str = config_and_inputs
__UpperCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
A = False
A = False
A = False
A = False
A = False
A = False
def a_ (self ) -> str:
__UpperCamelCase : Optional[Any] = UperNetModelTester(self )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ (self ) -> List[Any]:
return
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Union[str, Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCamelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def a_ (self ) -> List[Any]:
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def a_ (self ) -> Optional[Any]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def a_ (self ) -> List[str]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def a_ (self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ (self ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def a_ (self ) -> int:
pass
def a_ (self ) -> List[Any]:
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCamelCase : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCamelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase , __UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : Dict = _config_zero_init(_UpperCAmelCase )
__UpperCamelCase : Tuple = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__UpperCamelCase : int = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def a_ (self ) -> List[str]:
pass
@slow
def a_ (self ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str = UperNetForSemanticSegmentation.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
__UpperCamelCase : Dict = Image.open(snake_case__ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Any:
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
__UpperCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(_UpperCAmelCase )
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__UpperCamelCase : Any = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
__UpperCamelCase : List[str] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = prepare_img()
__UpperCamelCase : Optional[int] = processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase : int = model(**_UpperCAmelCase )
__UpperCamelCase : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__UpperCamelCase : Any = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(snake_case__ ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case__ , snake_case__ , snake_case__ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case__ , snake_case__ , snake_case__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case__ , snake_case__ , snake_case__ ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case__ , snake_case__ , snake_case__ ) , )
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = [90, 23, 6, 33, 21, 65, 123, 34_423]
__UpperCamelCase : Tuple = math.log(len(snake_case__ ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , snake_case__ , snake_case__ , snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCAmelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase = {f'funnel-transformer/{name}': 512 for name in _model_names}
_lowerCAmelCase = {f'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names}
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = FunnelTokenizer
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = 2
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase="##" , **_UpperCAmelCase , ) -> str:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , clean_text=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , wordpieces_prefix=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCamelCase : int = getattr(_UpperCAmelCase , normalizer_state.pop("type" ) )
__UpperCamelCase : str = do_lower_case
__UpperCamelCase : int = strip_accents
__UpperCamelCase : Optional[int] = tokenize_chinese_chars
__UpperCamelCase : Optional[int] = normalizer_class(**_UpperCAmelCase )
__UpperCamelCase : str = do_lower_case
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[Any]:
__UpperCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
__UpperCamelCase : Optional[Any] = [self.sep_token_id]
__UpperCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
__UpperCamelCase : str = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = [0 for i in range(len(snake_case__ ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase : Optional[int] = 0, 0
for i in range(1 , len(snake_case__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase : Tuple = min_edge
while go_next(snake_case__ , snake_case__ , snake_case__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
return i + z_result[i] < len(snake_case__ ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase : Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298
| 1
|
'''simple docstring'''
from manim import *
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : int = [mem.copy() for i in range(6 )]
__UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
__UpperCamelCase : str = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Dict = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Union[str, Any] = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Dict = Text("CPU" , font_size=2_4 )
__UpperCamelCase : Dict = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = [mem.copy() for i in range(4 )]
__UpperCamelCase : str = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : str = Text("GPU" , font_size=2_4 )
__UpperCamelCase : Any = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = [mem.copy() for i in range(6 )]
__UpperCamelCase : Optional[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : int = Text("Model" , font_size=2_4 )
__UpperCamelCase : Optional[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = []
for i, rect in enumerate(_UpperCAmelCase ):
rect.set_stroke(_UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCamelCase : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_UpperCAmelCase , buff=0.0 )
self.add(_UpperCAmelCase )
cpu_targs.append(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = [mem.copy() for i in range(6 )]
__UpperCamelCase : Any = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__UpperCamelCase : Tuple = Text("Loaded Checkpoint" , font_size=2_4 )
__UpperCamelCase : int = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , aligned_edge=_UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCamelCase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : Dict = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Any = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCamelCase : Any = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) )
self.play(Write(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) )
__UpperCamelCase : Tuple = []
__UpperCamelCase : Optional[int] = []
for i, rect in enumerate(_UpperCAmelCase ):
__UpperCamelCase : Optional[int] = fill.copy().set_fill(_UpperCAmelCase , opacity=0.7 )
target.move_to(_UpperCAmelCase )
first_animations.append(GrowFromCenter(_UpperCAmelCase , run_time=1 ) )
__UpperCamelCase : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(*_UpperCAmelCase )
self.wait()
| 298
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict:
__UpperCamelCase : Dict = parent
__UpperCamelCase : Any = do_resize
__UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8}
__UpperCamelCase : Any = size_divisor
__UpperCamelCase : Optional[int] = do_rescale
__UpperCamelCase : Union[str, Any] = rescale_factor
__UpperCamelCase : int = do_normalize
__UpperCamelCase : List[Any] = do_center_crop
__UpperCamelCase : Optional[int] = image_mean
__UpperCamelCase : Tuple = image_std
__UpperCamelCase : Tuple = do_pad
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Dict = num_channels
__UpperCamelCase : Dict = min_resolution
__UpperCamelCase : Optional[Any] = max_resolution
def a_ (self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
__UpperCamelCase : List[str] = self.size["shortest_edge"]
__UpperCamelCase : Optional[int] = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
__UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w
else:
__UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size
__UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size:
__UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = newh * scale
__UpperCamelCase : Union[str, Any] = neww * scale
__UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__UpperCamelCase , __UpperCamelCase : Optional[int] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__UpperCamelCase : int = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BridgeTowerImageProcessor if is_vision_available() else None
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self )
@property
def a_ (self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
def a_ (self ) -> List[str]:
pass
def a_ (self ) -> List[Any]:
# Initialize image processor
__UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> Tuple:
# Initialize image processor
__UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ (self ) -> int:
# Initialize image processor
__UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 298
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = 42
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__(self , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = ("DownEncoderBlock2D",) , _UpperCAmelCase = ("UpDecoderBlock2D",) , _UpperCAmelCase = (6_4,) , _UpperCAmelCase = 1 , _UpperCAmelCase = "silu" , _UpperCAmelCase = 3 , _UpperCAmelCase = 3_2 , _UpperCAmelCase = 2_5_6 , _UpperCAmelCase = 3_2 , _UpperCAmelCase = None , _UpperCAmelCase = 0.18_215 , _UpperCAmelCase = "group" , ) -> Dict:
super().__init__()
# pass init params to Encoder
__UpperCamelCase : Any = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
__UpperCamelCase : str = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCamelCase : Any = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
__UpperCamelCase : int = VectorQuantizer(_UpperCAmelCase , _UpperCAmelCase , beta=0.25 , remap=_UpperCAmelCase , sane_index_shape=_UpperCAmelCase )
__UpperCamelCase : List[Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
# pass init params to Decoder
__UpperCamelCase : Union[str, Any] = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , norm_type=_UpperCAmelCase , )
@apply_forward_hook
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = True ) -> VQEncoderOutput:
__UpperCamelCase : Optional[int] = self.encoder(_UpperCAmelCase )
__UpperCamelCase : int = self.quant_conv(_UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_UpperCAmelCase )
@apply_forward_hook
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = self.quantize(_UpperCAmelCase )
else:
__UpperCamelCase : Optional[int] = h
__UpperCamelCase : Tuple = self.post_quant_conv(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.decoder(_UpperCAmelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__UpperCamelCase : Optional[int] = sample
__UpperCamelCase : Tuple = self.encode(_UpperCAmelCase ).latents
__UpperCamelCase : Optional[int] = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 298
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__UpperCamelCase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__UpperCamelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case__ , output_all_encodings=snake_case__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__UpperCamelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__UpperCamelCase : Tuple = os.path.join(get_home_dir() , "models" )
__UpperCamelCase : Union[str, Any] = _load_vocab(snake_case__ , snake_case__ , snake_case__ , cls=snake_case__ )
__UpperCamelCase : Union[str, Any] = nlp.model.BERTModel(
snake_case__ , len(snake_case__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case__ , use_token_type_embed=snake_case__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case__ , use_decoder=snake_case__ , )
original_bort.load_parameters(snake_case__ , cast_dtype=snake_case__ , ignore_extra=snake_case__ )
__UpperCamelCase : int = original_bort._collect_params_with_prefix()
# Build our config 🤗
__UpperCamelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(snake_case__ ),
}
__UpperCamelCase : List[str] = BertConfig.from_dict(snake_case__ )
__UpperCamelCase : str = BertForMaskedLM(snake_case__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(snake_case__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = hf_param.shape
__UpperCamelCase : List[Any] = to_torch(params[gluon_param] )
__UpperCamelCase : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__UpperCamelCase : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__UpperCamelCase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__UpperCamelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__UpperCamelCase : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__UpperCamelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__UpperCamelCase : BertSelfAttention = layer.attention.self
__UpperCamelCase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__UpperCamelCase : str = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__UpperCamelCase : Tuple = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__UpperCamelCase : BertSelfOutput = layer.attention.output
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__UpperCamelCase : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__UpperCamelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__UpperCamelCase : BertIntermediate = layer.intermediate
__UpperCamelCase : Dict = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__UpperCamelCase : List[Any] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__UpperCamelCase : BertOutput = layer.output
__UpperCamelCase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__UpperCamelCase : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__UpperCamelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__UpperCamelCase : int = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__UpperCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-base" )
__UpperCamelCase : int = tokenizer.encode_plus(snake_case__ )["input_ids"]
# Get gluon output
__UpperCamelCase : Dict = mx.nd.array([input_ids] )
__UpperCamelCase : Any = original_bort(inputs=snake_case__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case__ )
__UpperCamelCase : Optional[Any] = BertModel.from_pretrained(snake_case__ )
hf_bort_model.eval()
__UpperCamelCase : str = tokenizer.encode_plus(snake_case__ , return_tensors="pt" )
__UpperCamelCase : Dict = hf_bort_model(**snake_case__ )[0]
__UpperCamelCase : List[Any] = output_gluon[0].asnumpy()
__UpperCamelCase : Optional[int] = output_hf[0].detach().numpy()
__UpperCamelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__UpperCamelCase : List[Any] = np.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 298
| 1
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( snake_case__ ):
for param in module.parameters():
__UpperCamelCase : Any = False
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCamelCase : Tuple = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Any = plt.imshow(snake_case__ )
fig.axes.get_xaxis().set_visible(snake_case__ )
fig.axes.get_yaxis().set_visible(snake_case__ )
plt.show()
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = datetime.now()
__UpperCamelCase : Dict = current_time.strftime("%H:%M:%S" )
return timestamp
| 298
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
class A ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def a_ (self ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_UpperCAmelCase , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCAmelCase )
def __lowerCAmelCase ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __lowerCAmelCase ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> Optional[Any]:
import apache_beam as beam
__UpperCamelCase : Optional[int] = beam.io.parquetio.WriteToParquet
__UpperCamelCase : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[int] = DummyBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
__UpperCamelCase : List[str] = partial(_UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
__UpperCamelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def a_ (self ) -> str:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=_UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a_ (self ) -> List[str]:
__UpperCamelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase : str = NestedBeamDataset(cache_dir=_UpperCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
__UpperCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _UpperCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _UpperCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_lowerCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
_lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowerCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowerCAmelCase = re.compile(R'''^\s*else:''')
def __lowerCAmelCase ( snake_case__ ):
if _re_test_backend.search(snake_case__ ) is None:
return None
__UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(snake_case__ )]
backends.sort()
return "_and_".join(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : List[Any] = f.readlines()
__UpperCamelCase : Optional[int] = 0
while line_index < len(snake_case__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__ ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCamelCase : Any = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__UpperCamelCase : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__ ):
__UpperCamelCase : int = _re_one_line_import_struct.search(snake_case__ ).groups()[0]
__UpperCamelCase : List[str] = re.findall(r"\[([^\]]+)\]" , snake_case__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__UpperCamelCase : int = _re_import_struct_key_value.search(snake_case__ )
if single_line_import_search is not None:
__UpperCamelCase : Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__UpperCamelCase : List[str] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCamelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__UpperCamelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(snake_case__ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case__ ) is not None:
__UpperCamelCase : List[Any] = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(", " )
__UpperCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_between_brackets.search(snake_case__ ) is not None:
__UpperCamelCase : str = _re_between_brackets.search(snake_case__ ).groups()[0].split(", " )
__UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_quote_object.search(snake_case__ ) is not None:
objects.append(_re_quote_object.search(snake_case__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCamelCase : List[Any] = []
while (
line_index < len(snake_case__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__UpperCamelCase : Union[str, Any] = lines[line_index]
__UpperCamelCase : Any = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCamelCase : Tuple = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCamelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__UpperCamelCase : Dict = lines[line_index]
__UpperCamelCase : List[Any] = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCamelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
def find_duplicates(snake_case__ ):
return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCamelCase : str = []
for key in import_dict_objects.keys():
__UpperCamelCase : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
__UpperCamelCase : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCamelCase : Any = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
__UpperCamelCase : int = os.path.join(snake_case__ , "__init__.py" )
__UpperCamelCase : List[str] = parse_init(snake_case__ )
if objects is not None:
__UpperCamelCase : str = analyze_results(*snake_case__ )
if len(snake_case__ ) > 0:
__UpperCamelCase : List[str] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(snake_case__ ) )
if len(snake_case__ ) > 0:
raise ValueError("\n\n".join(snake_case__ ) )
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = []
for path, directories, files in os.walk(snake_case__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(snake_case__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__ ) / folder).glob("*.py" ) ) ) == 0:
continue
__UpperCamelCase : Dict = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) )
__UpperCamelCase : Tuple = short_path.replace(os.path.sep , "." )
submodules.append(snake_case__ )
for fname in files:
if fname == "__init__.py":
continue
__UpperCamelCase : Optional[int] = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) )
__UpperCamelCase : Any = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(snake_case__ )
return submodules
_lowerCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __lowerCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
__UpperCamelCase : Optional[Any] = direct_transformers_import(snake_case__ )
__UpperCamelCase : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case__ , "__init__.py" ) , "r" ) as f:
__UpperCamelCase : Dict = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , snake_case__ ) ) )
__UpperCamelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case__ ) > 0:
__UpperCamelCase : int = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 298
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( snake_case__=None ):
if subparsers is not None:
__UpperCamelCase : Any = subparsers.add_parser("test" )
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=snake_case__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase : str = script_name
else:
__UpperCamelCase : Tuple = F"--config_file={args.config_file} {script_name}"
__UpperCamelCase : Optional[Any] = ["accelerate-launch"] + test_args.split()
__UpperCamelCase : Optional[Any] = execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = test_command_parser()
__UpperCamelCase : Union[str, Any] = parser.parse_args()
test_command(snake_case__ )
if __name__ == "__main__":
main()
| 298
| 1
|
'''simple docstring'''
import random
from typing import Any
def __lowerCAmelCase ( snake_case__ ):
for _ in range(len(snake_case__ ) ):
__UpperCamelCase : Union[str, Any] = random.randint(0 , len(snake_case__ ) - 1 )
__UpperCamelCase : Tuple = random.randint(0 , len(snake_case__ ) - 1 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 298
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = BlenderbotSmallTokenizer
A = False
def a_ (self ) -> List[str]:
super().setUp()
__UpperCamelCase : Optional[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__UpperCamelCase : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__UpperCamelCase : int = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCAmelCase ) )
def a_ (self , **_UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : List[Any] = "adapt act apte"
__UpperCamelCase : Dict = "adapt act apte"
return input_text, output_text
def a_ (self ) -> int:
__UpperCamelCase : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = "adapt act apte"
__UpperCamelCase : List[str] = ["adapt", "act", "ap@@", "te"]
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__UpperCamelCase : Any = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__UpperCamelCase : Dict = "I am a small frog."
__UpperCamelCase : Any = tok([src_text] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a_ (self ) -> List[Any]:
__UpperCamelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__UpperCamelCase : Tuple = "I am a small frog ."
__UpperCamelCase : List[str] = "."
__UpperCamelCase : Any = tok(_UpperCAmelCase )["input_ids"]
__UpperCamelCase : Optional[Any] = tok(_UpperCAmelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 298
| 1
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCAmelCase ( snake_case__ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCAmelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = "mock-s3-bucket"
__UpperCamelCase : Optional[int] = F"s3://{mock_bucket}"
__UpperCamelCase : Dict = extract_path_from_uri(snake_case__ )
assert dataset_path.startswith("s3://" ) is False
__UpperCamelCase : Any = "./local/path"
__UpperCamelCase : Any = extract_path_from_uri(snake_case__ )
assert dataset_path == new_dataset_path
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = is_remote_filesystem(snake_case__ )
assert is_remote is True
__UpperCamelCase : Optional[int] = fsspec.filesystem("file" )
__UpperCamelCase : Optional[Any] = is_remote_filesystem(snake_case__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__UpperCamelCase : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
__UpperCamelCase : Tuple = F"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
__UpperCamelCase : int = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
__UpperCamelCase : int = os.path.basename(snake_case__ )
__UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case__ , "r" , encoding="utf-8" ) as f, open(snake_case__ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__UpperCamelCase : str = compressed_file_paths[protocol]
__UpperCamelCase : Dict = "dataset.jsonl"
__UpperCamelCase : str = F"{protocol}://{member_file_path}::{compressed_file_path}"
__UpperCamelCase , *__UpperCamelCase : Any = fsspec.get_fs_token_paths(snake_case__ )
assert fs.isfile(snake_case__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[str] = hf_api.dataset_info(snake_case__ , token=snake_case__ )
__UpperCamelCase : List[str] = HfFileSystem(repo_info=snake_case__ , token=snake_case__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case__ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case__ , snake_case__ , clobber=snake_case__ )
with pytest.warns(snake_case__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case__ ) == 1
assert (
str(warning_info[0].message )
== F"A filesystem protocol was already set for {protocol} and will be overwritten."
)
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Optional[int]:
debug_launcher(test_script.main )
def a_ (self ) -> Optional[Any]:
debug_launcher(test_ops.main )
| 298
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Tuple = torch.exp(snake_case__ )
__UpperCamelCase : str = torch.sum(snake_case__ , dim=1 ) # sum of exp(x_i)
__UpperCamelCase : int = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case__ ) - B / A
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Union[str, Any]:
super().__init__()
__UpperCamelCase : Any = config.output_attentions
__UpperCamelCase : Dict = config.output_hidden_states
__UpperCamelCase : Union[str, Any] = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Tuple = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase : Optional[int] = [-1 for _ in range(config.num_hidden_layers )]
def a_ (self , _UpperCAmelCase ) -> int:
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase : str = x
else:
__UpperCamelCase : List[Any] = x
def a_ (self , _UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ()
__UpperCamelCase : Tuple = ()
__UpperCamelCase : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase : Tuple = all_hidden_states + (hidden_states,)
__UpperCamelCase : Optional[int] = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Tuple = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase : Optional[Any] = all_attentions + (layer_outputs[1],)
__UpperCamelCase : Any = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : int = current_outputs + (all_attentions,)
__UpperCamelCase : Optional[int] = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase : Dict = highway_exit[0]
__UpperCamelCase : Any = entropy(_UpperCAmelCase )
__UpperCamelCase : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
__UpperCamelCase : Optional[int] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase : int = all_hidden_states + (hidden_states,)
__UpperCamelCase : Dict = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase : Optional[int] = outputs + (all_attentions,)
__UpperCamelCase : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = config
__UpperCamelCase : Dict = BertEmbeddings(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = DeeBertEncoder(_UpperCAmelCase )
__UpperCamelCase : str = BertPooler(_UpperCAmelCase )
self.init_weights()
def a_ (self ) -> Any:
self.encoder.init_highway_pooler(self.pooler )
def a_ (self ) -> Optional[int]:
return self.embeddings.word_embeddings
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : int = value
def a_ (self , _UpperCAmelCase ) -> Tuple:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__UpperCamelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__UpperCamelCase : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase : int = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase : Optional[Any] = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase : Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase : Any = encoder_attention_mask[:, None, None, :]
__UpperCamelCase : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase : Dict = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase : Dict = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase : Optional[int] = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Union[str, Any] = encoder_outputs[0]
__UpperCamelCase : Any = self.pooler(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : Tuple = message
__UpperCamelCase : Union[str, Any] = exit_layer # start from 1!
class A ( nn.Module ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Dict:
super().__init__()
__UpperCamelCase : Union[str, Any] = BertPooler(_UpperCAmelCase )
__UpperCamelCase : int = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def a_ (self , _UpperCAmelCase ) -> Any:
# Pooler
__UpperCamelCase : Optional[int] = encoder_outputs[0]
__UpperCamelCase : str = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase : Dict = bmodel_output[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Any = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> Any:
super().__init__(_UpperCAmelCase )
__UpperCamelCase : List[Any] = config.num_labels
__UpperCamelCase : List[Any] = config.num_hidden_layers
__UpperCamelCase : Optional[int] = DeeBertModel(_UpperCAmelCase )
__UpperCamelCase : List[str] = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ) -> int:
__UpperCamelCase : int = self.num_layers
try:
__UpperCamelCase : Tuple = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase : str = outputs[1]
__UpperCamelCase : List[Any] = self.dropout(_UpperCAmelCase )
__UpperCamelCase : Dict = self.classifier(_UpperCAmelCase )
__UpperCamelCase : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase : int = e.message
__UpperCamelCase : Optional[Any] = e.exit_layer
__UpperCamelCase : Optional[int] = outputs[0]
if not self.training:
__UpperCamelCase : Optional[int] = entropy(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : List[str] = MSELoss()
__UpperCamelCase : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Dict = CrossEntropyLoss()
__UpperCamelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase : List[Any] = []
for highway_exit in outputs[-1]:
__UpperCamelCase : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase : Union[str, Any] = MSELoss()
__UpperCamelCase : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase : Optional[Any] = CrossEntropyLoss()
__UpperCamelCase : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
__UpperCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase : Dict = (loss,) + outputs
if not self.training:
__UpperCamelCase : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = '''config.json'''
_lowerCAmelCase = '''diffusion_pytorch_model.bin'''
_lowerCAmelCase = '''diffusion_flax_model.msgpack'''
_lowerCAmelCase = '''model.onnx'''
_lowerCAmelCase = '''diffusion_pytorch_model.safetensors'''
_lowerCAmelCase = '''weights.pb'''
_lowerCAmelCase = '''https://huggingface.co'''
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = '''diffusers_modules'''
_lowerCAmelCase = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
_lowerCAmelCase = ['''fp16''', '''non-ema''']
_lowerCAmelCase = '''.self_attn'''
| 298
| 1
|
'''simple docstring'''
import math
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 298
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Dict:
__UpperCamelCase : Optional[Any] = parent
__UpperCamelCase : List[str] = 1_3
__UpperCamelCase : List[Any] = 7
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = 9_9
__UpperCamelCase : Union[str, Any] = 3_8_4
__UpperCamelCase : str = 2
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : Any = 3_7
__UpperCamelCase : str = "gelu"
__UpperCamelCase : Optional[Any] = 0.1
__UpperCamelCase : str = 0.1
__UpperCamelCase : str = 5_1_2
__UpperCamelCase : Optional[Any] = 1_6
__UpperCamelCase : Dict = 2
__UpperCamelCase : Optional[int] = 0.02
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Optional[Any] = 4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : Tuple = 2
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : Any = None
def a_ (self ) -> int:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[Any] = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Tuple = TFConvBertModel(config=_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : str = model(_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : int = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__UpperCamelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : Optional[int] = self.num_choices
__UpperCamelCase : List[Any] = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : str = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase : List[str] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = self.num_labels
__UpperCamelCase : Tuple = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : int = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self ) -> str:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = config_and_inputs
__UpperCamelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Tuple = TFConvBertModelTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> Dict:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
__UpperCamelCase , __UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : int = True
if hasattr(_UpperCAmelCase , "use_cache" ):
__UpperCamelCase : List[Any] = True
__UpperCamelCase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : int = model_class(_UpperCAmelCase )
__UpperCamelCase : Any = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__UpperCamelCase : List[str] = os.path.join(_UpperCAmelCase , "saved_model" , "1" )
__UpperCamelCase : List[str] = tf.keras.models.load_model(_UpperCAmelCase )
__UpperCamelCase : Dict = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : Any = outputs["encoder_hidden_states"]
__UpperCamelCase : Tuple = outputs["encoder_attentions"]
else:
__UpperCamelCase : Tuple = outputs["hidden_states"]
__UpperCamelCase : Optional[int] = outputs["attentions"]
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__UpperCamelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__UpperCamelCase : Any = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
__UpperCamelCase : List[Any] = getattr(self.model_tester , "key_length" , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Dict = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
__UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCamelCase : Any = True
__UpperCamelCase : Dict = False
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__UpperCamelCase : str = model_class(_UpperCAmelCase )
__UpperCamelCase : Dict = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : Tuple = model_class(_UpperCAmelCase )
__UpperCamelCase : int = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCamelCase : int = True
__UpperCamelCase : str = True
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__UpperCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : Any = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 )
| 298
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCAmelCase = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
_lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = cn.convert_to_negative(snake_case__ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case__ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : List[str] = canny.canny(snake_case__ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ):
assert gg.gaussian_filter(snake_case__ , 5 , sigma=0.9 ).all()
def __lowerCAmelCase ( ):
# laplace diagonals
__UpperCamelCase : str = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Union[str, Any] = conv.img_convolve(snake_case__ , snake_case__ ).astype(snake_case__ )
assert res.any()
def __lowerCAmelCase ( ):
assert med.median_filter(snake_case__ , 3 ).any()
def __lowerCAmelCase ( ):
__UpperCamelCase , __UpperCamelCase : Dict = sob.sobel_filter(snake_case__ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = sp.make_sepia(snake_case__ , 20 )
assert sepia.all()
def __lowerCAmelCase ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" ):
__UpperCamelCase : int = bs.Burkes(imread(snake_case__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" , ):
__UpperCamelCase : Tuple = rs.NearestNeighbour(imread(snake_case__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__UpperCamelCase : Tuple = imread(snake_case__ , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Optional[int] = image[x_coordinate][y_coordinate]
__UpperCamelCase : List[str] = lbp.get_neighbors_pixel(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : Union[str, Any] = lbp.local_binary_value(snake_case__ , snake_case__ , snake_case__ )
assert lbp_image.any()
| 298
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__UpperCamelCase : List[Any] = [tuple(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else key for key in keys]
__UpperCamelCase : Optional[int] = Counter(_UpperCAmelCase )
__UpperCamelCase : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
__UpperCamelCase : List[Any] = super().construct_mapping(_UpperCAmelCase , deep=_UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase )
return mapping
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__UpperCamelCase : Any = full_content[1:].index("---" ) + 1
__UpperCamelCase : Tuple = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case__ )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def a_ (cls , _UpperCAmelCase ) -> "DatasetMetadata":
with open(_UpperCAmelCase , encoding="utf-8" ) as readme_file:
__UpperCamelCase , __UpperCamelCase : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase )
else:
return cls()
def a_ (self , _UpperCAmelCase ) -> List[str]:
if path.exists():
with open(_UpperCAmelCase , encoding="utf-8" ) as readme_file:
__UpperCamelCase : Tuple = readme_file.read()
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : str = self._to_readme(_UpperCAmelCase )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_UpperCAmelCase )
def a_ (self , _UpperCAmelCase = None ) -> str:
if readme_content is not None:
__UpperCamelCase , __UpperCamelCase : Dict = _split_yaml_from_readme(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
__UpperCamelCase : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def a_ (cls , _UpperCAmelCase ) -> "DatasetMetadata":
__UpperCamelCase : Dict = yaml.load(_UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__UpperCamelCase : Optional[int] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase )
def a_ (self ) -> str:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCAmelCase , allow_unicode=_UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
_lowerCAmelCase = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCAmelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
_lowerCAmelCase = ap.parse_args()
_lowerCAmelCase = Path(args.readme_filepath)
_lowerCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Any = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = {}
__UpperCamelCase : Dict = os.path.join(snake_case__ , "all_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
__UpperCamelCase : Any = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@classmethod
def a_ (cls ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
__UpperCamelCase : List[str] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase : Optional[Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def a_ (cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : int = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Dict:
__UpperCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : str = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "translation_no_trainer" ) ) )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[Any] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
__UpperCamelCase : str = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , "image_classification_no_trainer" ) ) )
| 298
| 1
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : int = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
__UpperCamelCase : Optional[int] = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[Any] = tensor[:sequence_length]
else:
__UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
else:
__UpperCamelCase : int = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__UpperCamelCase : Optional[int] = unicodedata.category(snake_case__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = 42
A = True
A = None
A = None
A = -1_0_0
A = "pt"
def a_ (self , _UpperCAmelCase ) -> str:
import torch
__UpperCamelCase : Any = "label" if "label" in features[0].keys() else "labels"
__UpperCamelCase : Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__UpperCamelCase : List[str] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__UpperCamelCase : Union[str, Any] = torch.tensor(batch["entity_ids"] ).shape[1]
__UpperCamelCase : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
__UpperCamelCase : Optional[Any] = [
list(_UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) for label in labels
]
else:
__UpperCamelCase : Optional[int] = [
[self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) + list(_UpperCAmelCase ) for label in labels
]
__UpperCamelCase : Optional[Any] = [feature["ner_tags"] for feature in features]
__UpperCamelCase : Any = padding_tensor(_UpperCAmelCase , -1 , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Any = [feature["original_entity_spans"] for feature in features]
__UpperCamelCase : str = padding_tensor(_UpperCAmelCase , (-1, -1) , _UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = {k: torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 298
|
'''simple docstring'''
from maths.prime_check import is_prime
def __lowerCAmelCase ( snake_case__ ):
if not isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = F"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=6 , _UpperCAmelCase=1_7 , _UpperCAmelCase=2_3 , _UpperCAmelCase=1_1 , _UpperCAmelCase=True , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = parent
__UpperCamelCase : Tuple = batch_size
__UpperCamelCase : Optional[int] = seq_length
__UpperCamelCase : Optional[Any] = act_dim
__UpperCamelCase : Dict = state_dim
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Union[str, Any] = max_length
__UpperCamelCase : Optional[Any] = is_training
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__UpperCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__UpperCamelCase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
__UpperCamelCase : Tuple = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
__UpperCamelCase : int = random_attention_mask((self.batch_size, self.seq_length) )
__UpperCamelCase : Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a_ (self ) -> Any:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> str:
__UpperCamelCase : List[Any] = DecisionTransformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Tuple = model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int = config_and_inputs
__UpperCamelCase : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (DecisionTransformerModel,) if is_torch_available() else ()
A = ()
A = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
A = False
A = False
A = False
A = False
A = False
A = False
A = False
A = False
A = False
def a_ (self ) -> int:
__UpperCamelCase : Tuple = DecisionTransformerModelTester(self )
__UpperCamelCase : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> Tuple:
self.config_tester.run_common_tests()
def a_ (self ) -> List[str]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a_ (self ) -> Optional[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] = DecisionTransformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Optional[Any] = model_class(_UpperCAmelCase )
__UpperCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : str = [*signature.parameters.keys()]
__UpperCamelCase : Tuple = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Any:
__UpperCamelCase : str = 2 # number of steps of autoregressive prediction we will perform
__UpperCamelCase : List[Any] = 1_0 # defined by the RL environment, may be normalized
__UpperCamelCase : str = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
__UpperCamelCase : Optional[int] = model.to(_UpperCAmelCase )
__UpperCamelCase : int = model.config
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ) # env.reset()
__UpperCamelCase : List[str] = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=_UpperCAmelCase )
__UpperCamelCase : Optional[int] = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__UpperCamelCase : int = state
__UpperCamelCase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=_UpperCAmelCase , dtype=torch.floataa )
__UpperCamelCase : int = torch.zeros(1 , 0 , device=_UpperCAmelCase , dtype=torch.floataa )
__UpperCamelCase : Optional[int] = torch.tensor(0 , device=_UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_UpperCAmelCase ):
__UpperCamelCase : List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCAmelCase )] , dim=1 )
__UpperCamelCase : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCAmelCase )] , dim=1 )
__UpperCamelCase : Union[str, Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = model(
states=_UpperCAmelCase , actions=_UpperCAmelCase , rewards=_UpperCAmelCase , returns_to_go=_UpperCAmelCase , timesteps=_UpperCAmelCase , attention_mask=_UpperCAmelCase , return_dict=_UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
__UpperCamelCase : Any = action_pred[0, -1]
__UpperCamelCase : Optional[int] = torch.cat([states, state] , dim=1 )
__UpperCamelCase : Any = returns_to_go[0, -1] - reward
__UpperCamelCase : List[str] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__UpperCamelCase : Optional[int] = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token") )
return token
def __lowerCAmelCase ( ):
__UpperCamelCase : str = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 1_000
__UpperCamelCase : Union[str, Any] = "huggingface/label-files"
__UpperCamelCase : List[str] = num_labels
__UpperCamelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="dataset" ) ) , "r" ) )
__UpperCamelCase : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Union[str, Any] = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__UpperCamelCase : Dict = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__UpperCamelCase : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__UpperCamelCase : int = [2, 2, 20]
__UpperCamelCase : Optional[int] = [3, 12, 16]
__UpperCamelCase : Dict = [192, 768, 1_024]
__UpperCamelCase : List[str] = CvtForImageClassification(snake_case__ )
__UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__UpperCamelCase : Union[str, Any] = image_size
__UpperCamelCase : Any = torch.load(snake_case__ , map_location=torch.device("cpu" ) )
__UpperCamelCase : str = OrderedDict()
__UpperCamelCase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__UpperCamelCase : List[Any] = list_of_state_dict + cls_token(snake_case__ )
__UpperCamelCase : Union[str, Any] = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
__UpperCamelCase : Any = list_of_state_dict + attention(snake_case__ , snake_case__ )
__UpperCamelCase : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
__UpperCamelCase : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase = '''src/transformers'''
_lowerCAmelCase = '''docs/source/en/tasks'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCamelCase : str = f.readlines()
# Find the start prompt.
__UpperCamelCase : Dict = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
__UpperCamelCase : Dict = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
__UpperCamelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCamelCase : List[str] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase = [8, 5, 9, 7]
_lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
__UpperCamelCase : str = claim_vector
__UpperCamelCase : Any = allocated_resources_table
__UpperCamelCase : int = maximum_claim_table
def a_ (self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a_ (self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a_ (self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a_ (self ) -> dict[int, list[int]]:
return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()}
def a_ (self , **_UpperCAmelCase ) -> None:
__UpperCamelCase : Tuple = self.__need()
__UpperCamelCase : int = self.__allocated_resources_table
__UpperCamelCase : str = self.__available_resources()
__UpperCamelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 5_0 + "\n" )
while need_list:
__UpperCamelCase : Dict = False
for each_need in need_list:
__UpperCamelCase : Optional[int] = True
for index, need in enumerate(_UpperCAmelCase ):
if need > available_resources[index]:
__UpperCamelCase : List[Any] = False
break
if execution:
__UpperCamelCase : Optional[int] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCamelCase : str = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(_UpperCAmelCase )
# update available/freed resources stack
__UpperCamelCase : Optional[Any] = np.array(_UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(_UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def a_ (self ) -> str:
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(_UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(_UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
with open(_UpperCAmelCase , encoding="utf-8" ) as input_file:
__UpperCamelCase : str = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__UpperCamelCase : Tuple = input_file.read()
__UpperCamelCase : Optional[int] = regexp.search(_UpperCAmelCase )
return match
def a_ (self , _UpperCAmelCase ) -> Tuple:
with open(_UpperCAmelCase , encoding="utf-8" ) as input_file:
__UpperCamelCase : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__UpperCamelCase : List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase : Any = regexp.finditer(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : Dict = Path("./datasets" )
__UpperCamelCase : Optional[int] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = Path("./datasets" )
__UpperCamelCase : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return "".join([hex(snake_case__ )[2:].zfill(2 ).upper() for byte in list(snake_case__ )] )
def __lowerCAmelCase ( snake_case__ ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase : Dict = ""
else:
__UpperCamelCase : List[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__UpperCamelCase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Any = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase : Tuple = in_proj_bias[: config.hidden_size]
__UpperCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : int = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : str = dct.pop(snake_case__ )
__UpperCamelCase : Any = val
def __lowerCAmelCase ( ):
__UpperCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[Any] = ViTConfig()
__UpperCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__UpperCamelCase : Dict = True
__UpperCamelCase : str = int(vit_name[-12:-10] )
__UpperCamelCase : List[Any] = int(vit_name[-9:-6] )
else:
__UpperCamelCase : Optional[int] = 1_000
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : List[Any] = "imagenet-1k-id2label.json"
__UpperCamelCase : Optional[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Optional[Any] = idalabel
__UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = int(vit_name[-6:-4] )
__UpperCamelCase : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
__UpperCamelCase : int = 192
__UpperCamelCase : List[str] = 768
__UpperCamelCase : Union[str, Any] = 12
__UpperCamelCase : Optional[int] = 3
elif vit_name[9:].startswith("small" ):
__UpperCamelCase : Union[str, Any] = 384
__UpperCamelCase : List[str] = 1_536
__UpperCamelCase : List[Any] = 12
__UpperCamelCase : Any = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
__UpperCamelCase : Optional[int] = 768
__UpperCamelCase : Any = 2_304
__UpperCamelCase : Union[str, Any] = 8
__UpperCamelCase : Any = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
__UpperCamelCase : Any = 1_024
__UpperCamelCase : Optional[Any] = 4_096
__UpperCamelCase : Optional[int] = 24
__UpperCamelCase : Optional[int] = 16
elif vit_name[4:].startswith("huge" ):
__UpperCamelCase : Tuple = 1_280
__UpperCamelCase : str = 5_120
__UpperCamelCase : Union[str, Any] = 32
__UpperCamelCase : List[Any] = 16
# load original model from timm
__UpperCamelCase : Optional[Any] = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
__UpperCamelCase : Union[str, Any] = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__UpperCamelCase : str = ViTModel(snake_case__ ).eval()
else:
__UpperCamelCase : List[Any] = ViTForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__UpperCamelCase : int = DeiTImageProcessor(size=config.image_size )
else:
__UpperCamelCase : Dict = ViTImageProcessor(size=config.image_size )
__UpperCamelCase : int = image_processor(images=prepare_img() , return_tensors="pt" )
__UpperCamelCase : Dict = encoding["pixel_values"]
__UpperCamelCase : List[Any] = model(snake_case__ )
if base_model:
__UpperCamelCase : str = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1E-3 )
else:
__UpperCamelCase : Optional[int] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ = 4_000_000 ):
__UpperCamelCase : int = []
__UpperCamelCase , __UpperCamelCase : Any = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 298
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_3 , _UpperCAmelCase=1_6 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=3_2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=3_0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=None , ) -> int:
__UpperCamelCase : List[str] = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : str = decoder_seq_length
# For common tests
__UpperCamelCase : Optional[int] = self.decoder_seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Dict = vocab_size
__UpperCamelCase : Optional[int] = d_model
__UpperCamelCase : Union[str, Any] = d_model
__UpperCamelCase : int = decoder_layers
__UpperCamelCase : Dict = decoder_layers
__UpperCamelCase : str = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : Optional[Any] = decoder_attention_heads
__UpperCamelCase : List[Any] = eos_token_id
__UpperCamelCase : int = bos_token_id
__UpperCamelCase : Tuple = pad_token_id
__UpperCamelCase : Tuple = decoder_start_token_id
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Optional[Any] = max_position_embeddings
__UpperCamelCase : int = None
__UpperCamelCase : Optional[int] = decoder_seq_length
__UpperCamelCase : Optional[int] = 2
__UpperCamelCase : Optional[int] = 1
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : int = None
if self.use_attention_mask:
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__UpperCamelCase : List[str] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__UpperCamelCase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__UpperCamelCase : str = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
__UpperCamelCase : Optional[int] = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__UpperCamelCase : List[Any] = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__UpperCamelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Tuple = model(_UpperCAmelCase )["last_hidden_state"]
__UpperCamelCase : Any = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__UpperCamelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__UpperCamelCase : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A = (TrOCRForCausalLM,) if is_torch_available() else ()
A = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A = True
A = False
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__UpperCamelCase : Dict = ConfigTester(self , config_class=_UpperCAmelCase )
def a_ (self ) -> Dict:
pass
def a_ (self ) -> Optional[int]:
pass
def a_ (self ) -> Optional[Any]:
pass
def a_ (self ) -> Dict:
self.config_tester.run_common_tests()
def a_ (self ) -> List[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def a_ (self ) -> Any:
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def a_ (self ) -> Tuple:
pass
| 298
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.