code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
__lowerCamelCase : Any = int(input('''Enter number: ''').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase = """base_with_context"""
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase =ly_weight['attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =ly_weight['attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=_lowerCAmelCase )
__lowercase =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowercase =weights[f"""layers_{lyr_num}"""]
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__lowercase =ly_weight['self_attention']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =ly_weight['MultiHeadDotProductAttention_0']
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowercase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__lowercase =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowercase =jnp.tree_util.tree_map(onp.array , _lowerCAmelCase )
__lowercase =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__lowercase =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__lowercase =inference.parse_training_gin_file(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =inference.InferenceModel(args.checkpoint_path , _lowerCAmelCase )
__lowercase =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__lowercase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowercase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowercase =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , _lowerCAmelCase )
__lowercase =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , _lowerCAmelCase )
__lowercase =load_decoder(ta_checkpoint['target']['decoder'] , _lowerCAmelCase )
__lowercase =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__lowercase =SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase , continuous_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase , scheduler=_lowerCAmelCase , melgan=_lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCamelCase = parser.parse_args()
main(args)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__ = TypeVar("""KT""")
A__ = TypeVar("""VT""")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = "root" , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = value
_lowerCAmelCase = []
def __repr__( self ):
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = 0.5 , _snake_case = 16 ):
"""simple docstring"""
_lowerCAmelCase = Node[KT, VT]()
_lowerCAmelCase = 0
_lowerCAmelCase = p
_lowerCAmelCase = max_level
def __str__( self ):
"""simple docstring"""
_lowerCAmelCase = list(self )
if len(_snake_case ) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase = max((len(str(_snake_case ) ) for item in items) , default=4 )
_lowerCAmelCase = max(_snake_case , 4 ) + 4
_lowerCAmelCase = self.head
_lowerCAmelCase = []
_lowerCAmelCase = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_snake_case , """-""" ) + """* """ * len(_snake_case ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
while len(node.forward ) != 0:
_lowerCAmelCase = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_snake_case , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
_lowerCAmelCase = node.forward
lines.append("""None""".ljust(_snake_case ) + """* """ * len(_snake_case ) )
return F'SkipList(level={self.level})\n' + "\n".join(_snake_case )
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase = node.forward[0]
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
for i, update_node in enumerate(_snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase = node.forward[i]
else:
_lowerCAmelCase = update_node.forward[:i]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
_lowerCAmelCase = value
else:
_lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _snake_case ):
update_vector.append(self.head )
_lowerCAmelCase = level
_lowerCAmelCase = Node(_snake_case , _snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_snake_case )
else:
_lowerCAmelCase = new_node
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
assert len(snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
if len(snake_case ) != 4:
print()
assert len(snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(snake_case ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(snake_case ):
return all(next_item >= item for item, next_item in zip(snake_case , lst[1:] ) )
_lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case , snake_case )
assert is_sorted(list(snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82 |
A__ = [0, 2, 4, 6, 8]
A__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_lowerCAmelCase = 0
for digit in range(10 ):
_lowerCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case , snake_case )
return result
_lowerCAmelCase = 0
for digita in range(10 ):
_lowerCAmelCase = digita
if (remainder + digita) % 2 == 0:
_lowerCAmelCase = ODD_DIGITS
else:
_lowerCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_lowerCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case , snake_case , )
return result
def _UpperCAmelCase ( snake_case = 9 ):
"""simple docstring"""
_lowerCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case , 0 , [0] * length , snake_case )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
SCREAMING_SNAKE_CASE : List[str] = len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = matrix_length // 2
SCREAMING_SNAKE_CASE : int = [[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Optional[Any] = [
[a[i][j] for j in range(__UpperCamelCase ,__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )
]
SCREAMING_SNAKE_CASE : Union[str, Any] = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase ,__UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
return len(__UpperCamelCase ), len(matrix[0] )
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
print('\n'.join(str(__UpperCamelCase ) for line in matrix ) )
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = split_matrix(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : int = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(__UpperCamelCase ,matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = actual_strassen(matrix_subtraction(__UpperCamelCase ,__UpperCamelCase ) ,matrix_addition(__UpperCamelCase ,__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = matrix_addition(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ) ,__UpperCamelCase )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase__( __UpperCamelCase: list ,__UpperCamelCase: list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
SCREAMING_SNAKE_CASE : Tuple = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_dimensions(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : List[Any] = max(*__UpperCamelCase ,*__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = int(math.pow(2 ,math.ceil(math.loga(__UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrixa
SCREAMING_SNAKE_CASE : List[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[int] = actual_strassen(__UpperCamelCase ,__UpperCamelCase )
# Removing the additional zeros
for i in range(0 ,__UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,__UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCamelCase_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCamelCase_ = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 246 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Tuple = t
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = False
A : List[str] = False
A : Union[str, Any] = False
A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(A, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
| 246 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''char'''
UpperCamelCase = '''bpe'''
UpperCamelCase = '''wp'''
lowerCamelCase : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase = '''ViTImageProcessor'''
UpperCamelCase = '''MgpstrTokenizer'''
def __init__( self : str , A_ : Any=None , A_ : Optional[int]=None , **A_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained('gpt2' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A_ , A_ )
def __call__( self : List[Any] , A_ : Dict=None , A_ : Union[str, Any]=None , A_ : int=None , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase_ = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None:
lowerCamelCase_ = self.char_tokenizer(A_ , return_tensors=A_ , **A_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ = encodings['input_ids']
return inputs
def a__ ( self : int , A_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sequences
lowerCamelCase_ = char_preds.size(0 )
lowerCamelCase_ , lowerCamelCase_ = self._decode_helper(A_ , 'char' )
lowerCamelCase_ , lowerCamelCase_ = self._decode_helper(A_ , 'bpe' )
lowerCamelCase_ , lowerCamelCase_ = self._decode_helper(A_ , 'wp' )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i in range(A_ ):
lowerCamelCase_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCamelCase_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCamelCase_ = scores.index(max(A_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCamelCase_ = {}
lowerCamelCase_ = final_strs
lowerCamelCase_ = final_scores
lowerCamelCase_ = char_strs
lowerCamelCase_ = bpe_strs
lowerCamelCase_ = wp_strs
return out
def a__ ( self : Union[str, Any] , A_ : List[str] , A_ : str ) -> Tuple:
"""simple docstring"""
if format == DecodeType.CHARACTER:
lowerCamelCase_ = self.char_decode
lowerCamelCase_ = 1
lowerCamelCase_ = '[s]'
elif format == DecodeType.BPE:
lowerCamelCase_ = self.bpe_decode
lowerCamelCase_ = 2
lowerCamelCase_ = '#'
elif format == DecodeType.WORDPIECE:
lowerCamelCase_ = self.wp_decode
lowerCamelCase_ = 102
lowerCamelCase_ = '[SEP]'
else:
raise ValueError(f"""Format {format} is not supported.""" )
lowerCamelCase_ , lowerCamelCase_ = [], []
lowerCamelCase_ = pred_logits.size(0 )
lowerCamelCase_ = pred_logits.size(1 )
lowerCamelCase_ , lowerCamelCase_ = pred_logits.topk(1 , dim=-1 , largest=A_ , sorted=A_ )
lowerCamelCase_ = preds_index.view(-1 , A_ )[:, 1:]
lowerCamelCase_ = decoder(A_ )
lowerCamelCase_ , lowerCamelCase_ = torch.nn.functional.softmax(A_ , dim=2 ).max(dim=2 )
lowerCamelCase_ = preds_max_prob[:, 1:]
for index in range(A_ ):
lowerCamelCase_ = preds_str[index].find(A_ )
lowerCamelCase_ = preds_str[index][:pred_eos]
lowerCamelCase_ = preds_index[index].cpu().tolist()
lowerCamelCase_ = pred_index.index(A_ ) if eos_token in pred_index else -1
lowerCamelCase_ = preds_max_prob[index][: pred_eos_index + 1]
lowerCamelCase_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A_ )
conf_scores.append(A_ )
return dec_strs, conf_scores
def a__ ( self : Tuple , A_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(A_ )]
return decode_strs
def a__ ( self : List[Any] , A_ : Any ) -> Tuple:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(A_ )
def a__ ( self : Tuple , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(A_ )]
return decode_strs
| 204 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( lowercase : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase_ = []
if isinstance(lowercase , lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Tuple[int, ...] ):
'''simple docstring'''
lowerCamelCase_ = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
lowerCamelCase_ = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Optional[Sequence[bool]] = None , lowercase : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(lowercase : List[bool] ) -> None:
lowerCamelCase_ = True
for i in range(len(lowercase ) ):
lowerCamelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ = l[reversed_idx]
if start_edges is None:
lowerCamelCase_ = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
lowerCamelCase_ = [e == (d - 1) for e, d in zip(lowercase , lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase_ = []
lowerCamelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase , lowercase ):
if s == e:
path_list.append(slice(lowercase , s + 1 ) )
else:
break
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = start[divergence_idx]
return tuple(
path + (slice(lowercase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = end[divergence_idx]
return tuple(
path + (slice(lowercase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : torch.Tensor , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = t.shape[:no_batch_dims]
lowerCamelCase_ = list(_flat_idx_to_idx(lowercase , lowercase ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ = list(_flat_idx_to_idx(flat_end - 1 , lowercase ) )
# Get an ordered list of slices to perform
lowerCamelCase_ = _get_minimal_slice_set(
lowercase , lowercase , lowercase , )
lowerCamelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : Dict[str, Any] , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : Any = None , lowercase : bool = False , ):
'''simple docstring'''
if not (len(lowercase ) > 0):
raise ValueError('Must provide at least one input' )
lowerCamelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
lowerCamelCase_ = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ = tensor_tree_map(_prep_inputs , lowercase )
lowerCamelCase_ = None
if _out is not None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ = 0
lowerCamelCase_ = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
lowerCamelCase_ = _select_chunk
else:
lowerCamelCase_ = partial(
_chunk_slice , flat_start=lowercase , flat_end=min(lowercase , i + chunk_size ) , no_batch_dims=len(lowercase ) , )
lowerCamelCase_ = tensor_tree_map(lowercase , lowercase )
# Run the layer on the chunk
lowerCamelCase_ = layer(**lowercase )
# Allocate space for the output
if out is None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase , lowercase ):
def assign(lowercase : dict , lowercase : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase , lowercase ):
assign(lowercase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ = da[k]
assign(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for xa, xa in zip(lowercase , lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ = xa
elif isinstance(lowercase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) , lowercase )
return out
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int = 512 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = max_chunk_size
lowerCamelCase_ = None
lowerCamelCase_ = None
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int ) -> int:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ = [c for c in candidates if c > min_chunk_size]
lowerCamelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_ : int ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
lowerCamelCase_ = 0
lowerCamelCase_ = len(A_ ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ = i
lowerCamelCase_ = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self : List[str] , A_ : Iterable , A_ : Iterable ) -> bool:
"""simple docstring"""
lowerCamelCase_ = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int , ) -> int:
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
lowerCamelCase_ = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ = False
if not consistent:
lowerCamelCase_ = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
lowerCamelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 204 | 1 |
def snake_case (UpperCAmelCase__ ) -> bool:
UpperCamelCase_: List[Any] = [int(UpperCAmelCase__ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCAmelCase__ ) == 4 and all(0 <= int(UpperCAmelCase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
A_ : Dict = input().strip()
A_ : List[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''') | 361 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case (UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: int = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Any = [-1] * len(UpperCAmelCase__ )
def dfs(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCAmelCase__ , 1 - c )
for i in range(len(UpperCAmelCase__ ) ):
if not visited[i]:
dfs(UpperCAmelCase__ , 0 )
for i in range(len(UpperCAmelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Dict = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 292 | 0 |
'''simple docstring'''
lowercase : Optional[int] = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution())
| 3 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 0 |
def _A ( __magic_name__ , __magic_name__ ):
def get_matched_characters(__magic_name__ , __magic_name__ ) -> str:
lowercase__ = []
lowercase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase__ = int(max(0 , i - limit ) )
lowercase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__magic_name__ )
lowercase__ = f'''{_stra[0:_stra.index(__magic_name__ )]} {_stra[_stra.index(__magic_name__ ) + 1:]}'''
return "".join(__magic_name__ )
# matching characters
lowercase__ = get_matched_characters(__magic_name__ , __magic_name__ )
lowercase__ = get_matched_characters(__magic_name__ , __magic_name__ )
lowercase__ = len(__magic_name__ )
# transposition
lowercase__ = (
len([(ca, ca) for ca, ca in zip(__magic_name__ , __magic_name__ ) if ca != ca] ) // 2
)
if not match_count:
lowercase__ = 0.0
else:
lowercase__ = (
1
/ 3
* (
match_count / len(__magic_name__ )
+ match_count / len(__magic_name__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 201 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowercase )
self.set_fail_transitions()
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self :List[str] , _lowercase :str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_lowercase , _lowercase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowercase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowercase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowercase , self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_lowercase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_lowercase ) ):
while (
self.find_next_state(_lowercase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_lowercase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_lowercase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 1 |
"""simple docstring"""
def __magic_name__ ( __snake_case : str ) -> int:
lowercase : Any = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase : str = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_A : Tuple = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
_A : Tuple = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 202 | """simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),)
def a_ ( self, **lowerCAmelCase__) -> int:
snake_case_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf'),
'variance_type': None,
}
config.update(**lowerCAmelCase__)
return config
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> List[Any]:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1):
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self) -> Union[str, Any]:
pass
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> int:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
return sample
def a_ ( self) -> List[Any]:
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = 50
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2574) < 1e-3
def a_ ( self) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> str:
self.check_over_configs(thresholding=lowerCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, algorithm_type='dpmsolver++', solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, )
def a_ ( self) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
snake_case_ = self.full_loop(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, algorithm_type=lowerCAmelCase__, )
assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers"
def a_ ( self) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCAmelCase__)
self.check_over_configs(lower_order_final=lowerCAmelCase__)
def a_ ( self) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf'))
self.check_over_configs(lambda_min_clipped=-5.1)
def a_ ( self) -> Any:
self.check_over_configs(variance_type=lowerCAmelCase__)
self.check_over_configs(variance_type='learned_range')
def a_ ( self) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0)
def a_ ( self) -> int:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2791) < 1e-3
def a_ ( self) -> Dict:
snake_case_ = self.full_loop(use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2248) < 1e-3
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction')
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.1453) < 1e-3
def a_ ( self) -> Optional[Any]:
snake_case_ = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.0649) < 1e-3
def a_ ( self) -> Optional[int]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
| 69 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def a_ ( __lowercase : np.ndarray , __lowercase : float , __lowercase : int = 16_000 ) -> List[str]:
_snake_case = int(round(sample_rate * max_length ) )
if len(__lowercase ) <= sample_length:
return wav
_snake_case = randint(0 , len(__lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[str] = field(default=UpperCAmelCase ,metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "A file containing the training audio paths and labels."} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "A file containing the validation audio paths and labels."} )
_UpperCAmelCase : str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
_UpperCAmelCase : str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
_UpperCAmelCase : str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
_UpperCAmelCase : str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_UpperCAmelCase : Optional[int] = field(
default=UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase : Optional[int] = field(
default=UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
_UpperCAmelCase : float = field(
default=2_0 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_UpperCAmelCase : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase : Optional[str] = field(
default=UpperCAmelCase ,metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
_UpperCAmelCase : Optional[bool] = field(
default=UpperCAmelCase ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_UpperCAmelCase : bool = field(
default=UpperCAmelCase ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def A ( self : List[Any] ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def a_ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'Make sure to set `--label_column_name` to the correct text column - one of '
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case = feature_extractor.model_input_names[0]
def train_transforms(__lowercase : Union[str, Any] ):
_snake_case = []
for audio in batch[data_args.audio_column_name]:
_snake_case = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowercase )
_snake_case = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(__lowercase )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__lowercase : Optional[Any] ):
_snake_case = [audio['array'] for audio in batch[data_args.audio_column_name]]
_snake_case = feature_extractor(__lowercase , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(__lowercase )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case = raw_datasets['train'].features[data_args.label_column_name].names
_snake_case , _snake_case = {}, {}
for i, label in enumerate(__lowercase ):
_snake_case = str(__lowercase )
_snake_case = label
# Load the accuracy metric from the datasets package
_snake_case = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__lowercase : Dict ):
_snake_case = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowercase , references=eval_pred.label_ids )
_snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowercase , output_all_columns=__lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowercase , output_all_columns=__lowercase )
# Initialize our trainer
_snake_case = Trainer(
model=__lowercase , args=__lowercase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
# Write model card and (optionally) push to hub
_snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main() | 359 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
_lowerCamelCase : List[str] = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def a_ ( __lowercase : int ) -> Optional[int]:
_snake_case = EfficientNetConfig()
_snake_case = CONFIG_MAP[model_name]['hidden_dim']
_snake_case = CONFIG_MAP[model_name]['width_coef']
_snake_case = CONFIG_MAP[model_name]['depth_coef']
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = CONFIG_MAP[model_name]['dropout_rate']
_snake_case = CONFIG_MAP[model_name]['dw_padding']
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = 1_000
_snake_case = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__lowercase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def a_ ( ) -> Any:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def a_ ( __lowercase : Union[str, Any] ) -> Tuple:
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def a_ ( __lowercase : str ) -> List[Any]:
_snake_case = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_snake_case = sorted(set(__lowercase ) )
_snake_case = len(__lowercase )
_snake_case = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
_snake_case = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_snake_case = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_snake_case = {}
for item in rename_keys:
if item[0] in original_param_names:
_snake_case = 'efficientnet.' + item[1]
_snake_case = 'classifier.weight'
_snake_case = 'classifier.bias'
return key_mapping
def a_ ( __lowercase : Any , __lowercase : Any , __lowercase : Any ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_snake_case = key_mapping[key]
if "_conv" in key and "kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_snake_case = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_snake_case = torch.from_numpy(np.transpose(__lowercase ) )
else:
_snake_case = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def a_ ( __lowercase : List[Any] , __lowercase : Any , __lowercase : int , __lowercase : str ) -> Dict:
_snake_case = model_classes[model_name](
include_top=__lowercase , weights='imagenet' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_000 , classifier_activation='softmax' , )
_snake_case = original_model.trainable_variables
_snake_case = original_model.non_trainable_variables
_snake_case = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_snake_case = param.numpy()
_snake_case = list(tf_params.keys() )
# Load HuggingFace model
_snake_case = get_efficientnet_config(__lowercase )
_snake_case = EfficientNetForImageClassification(__lowercase ).eval()
_snake_case = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_snake_case = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
_snake_case = convert_image_processor(__lowercase )
_snake_case = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_snake_case = hf_model(**__lowercase )
_snake_case = outputs.logits.detach().numpy()
# Original model inference
_snake_case = False
_snake_case = CONFIG_MAP[model_name]['image_size']
_snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_snake_case = image.img_to_array(__lowercase )
_snake_case = np.expand_dims(__lowercase , axis=0 )
_snake_case = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_snake_case = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
_lowerCamelCase : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 130 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( UpperCamelCase__ ):
return [ord(UpperCamelCase__ ) - 9_6 for elem in plain]
def _UpperCamelCase ( UpperCamelCase__ ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _UpperCamelCase ( ):
UpperCAmelCase__ : int = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCamelCase__ )
print("""Decoded:""" , decode(UpperCamelCase__ ) )
if __name__ == "__main__":
main() | 163 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 1 |
from math import pow, sqrt
def lowerCamelCase_ ( *_a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = len(_a ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_a , _a , _a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_a , _a , _a )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 358 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ : str = 1 / (2 * pi * sigma) * exp(-(square(_a ) + square(_a )) / (2 * square(_a )) )
return g
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ : Any = height - k_size + 1
lowerCAmelCase__ : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ : int = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ : List[str] = 0
for i, j in product(range(_a ) , range(_a ) ):
lowerCAmelCase__ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ : List[Any] = gen_gaussian_kernel(_a , _a )
lowerCAmelCase__ : str = ravel(_a )
# reshape and get the dst image
lowerCAmelCase__ : int = dot(_a , _a ).reshape(_a , _a ).astype(_a )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase = imread(R'''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 211 | 0 |
"""simple docstring"""
__A : List[Any] = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : int = frozenset(["prompt", "negative_prompt"])
__A : List[Any] = frozenset([])
__A : Optional[Any] = frozenset(["image"])
__A : Dict = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[Any] = frozenset(["image"])
__A : str = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "negative_prompt"])
__A : List[Any] = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : str = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : Optional[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Union[str, Any] = frozenset(["image", "mask_image"])
__A : Any = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Union[str, Any] = frozenset(["example_image", "image", "mask_image"])
__A : Optional[Any] = frozenset(["class_labels"])
__A : Tuple = frozenset(["class_labels"])
__A : Tuple = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : Optional[Any] = frozenset(["batch_size"])
__A : Optional[Any] = frozenset([])
__A : Union[str, Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Tuple = frozenset(["prompt", "negative_prompt"])
__A : int = frozenset(["input_tokens"])
__A : List[Any] = frozenset(["input_tokens"])
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260 | 1 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE : Tuple = '''path-to-your-trained-model'''
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
SCREAMING_SNAKE_CASE : Any = '''A photo of sks dog in a bucket'''
SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 317 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : list ) -> list:
"""simple docstring"""
for i in range(len(snake_case_ ) - 1 , 0 , -1 ):
_lowerCAmelCase = False
for j in range(snake_case_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j - 1], unsorted[j]
_lowerCAmelCase = True
for j in range(snake_case_ ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase = unsorted[j + 1], unsorted[j]
_lowerCAmelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(''',''')]
print(F'{cocktail_shaker_sort(unsorted) = }') | 317 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ["pixel_values"]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : List[Any] = size if size is not None else {"shortest_edge": 2_2_4}
UpperCAmelCase : int = get_size_dict(snake_case , default_to_square=snake_case )
UpperCAmelCase : int = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase : str = get_size_dict(snake_case , default_to_square=snake_case , param_name="crop_size" )
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[Any] = size
UpperCAmelCase : Optional[Any] = resample
UpperCAmelCase : Union[str, Any] = do_center_crop
UpperCAmelCase : Optional[int] = crop_size
UpperCAmelCase : List[Any] = do_rescale
UpperCAmelCase : Tuple = rescale_factor
UpperCAmelCase : Tuple = do_normalize
UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : Any = do_convert_rgb
def A_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = get_size_dict(snake_case , default_to_square=snake_case )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(snake_case , size=size["shortest_edge"] , default_to_square=snake_case )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Tuple = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(snake_case , size=(size["height"], size["width"]) , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
'''simple docstring'''
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : int = size if size is not None else self.size
UpperCAmelCase : Tuple = get_size_dict(snake_case , param_name="size" , default_to_square=snake_case )
UpperCAmelCase : Any = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : int = get_size_dict(snake_case , param_name="crop_size" , default_to_square=snake_case )
UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : Optional[Any] = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : Tuple = [convert_to_rgb(snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
UpperCAmelCase : str = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase : List[Any] = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
UpperCAmelCase : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
UpperCAmelCase : Any = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
UpperCAmelCase : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 311 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a : Optional[int] = _symbol_database.Default()
a : Any = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a : str = None
a : Optional[Any] = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a : str = 45
a : Any = 15_81
a : List[Any] = 15_17
a : Union[str, Any] = 15_70
a : Optional[Any] = 15_84
a : List[str] = 17_93
a : Optional[Any] = 17_95
a : Tuple = 19_16
a : Optional[Any] = 18_64
a : int = 19_05
a : Optional[Any] = 19_19
a : Union[str, Any] = 24_29
a : List[Any] = 22_08
a : Dict = 24_18
a : Optional[int] = 23_23
a : str = 24_07
# @@protoc_insertion_point(module_scope)
| 311 | 1 |
import random
from typing import Any
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ : Optional[int] = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
lowercase__ , lowercase__ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
snake_case_ = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case_ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 216 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
snake_case_ = '''</w>'''
snake_case_ = '''@@ '''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
lowercase__ : str = do_lower_case
with open(a , encoding='utf-8') as vocab_handle:
lowercase__ : Tuple = json.load(a)
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
lowercase__ : int = None
lowercase__ : List[Any] = None
else:
with open(a , encoding='utf-8') as merges_handle:
lowercase__ : List[Any] = merges_handle.read().split('\n')[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
lowercase__ : Tuple = dict(zip(a , range(len(a))))
lowercase__ : List[str] = {}
@property
def snake_case_ ( self):
return len(self.decoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
lowercase__ : int = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : Any = get_pairs(a)
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Dict = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : int = 0
while i < len(a):
try:
lowercase__ : Dict = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : str = tuple(a)
lowercase__ : Union[str, Any] = new_word
if len(a) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(a)
lowercase__ : List[str] = ' '.join(a)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Optional[int] = '\n' + BPE_TOKEN_MERGES
if word.endswith(a):
lowercase__ : Dict = word.replace(a , '')
lowercase__ : int = word.replace(' ' , a)
lowercase__ : List[str] = word
return word
def snake_case_ ( self , a):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
lowercase__ : int = text.lower()
lowercase__ : Optional[int] = text.split()
lowercase__ : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def snake_case_ ( self , a):
return self.encoder.get(a , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self.decoder.get(a , self.unk_token)
return result
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = ' '.join(a)
# make sure @@ tokens are concatenated
lowercase__ : Optional[int] = ''.join(string.split(a))
return string
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
lowercase__ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : Dict = token_index
writer.write(' '.join(a) + '\n')
index += 1
return (vocab_file, merges_file)
| 216 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ):
'''simple docstring'''
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[Any]=9_9 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : int=2_0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : int=0 , ) -> str:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.eos_token_id # Eos Token
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase_ ( self : int ) -> Tuple:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
__SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
__SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"]
__SCREAMING_SNAKE_CASE = inputs_dict["head_mask"]
# first forward pass
__SCREAMING_SNAKE_CASE = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(lowercase_ , attention_mask=lowercase_ )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[
"last_hidden_state"
]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-2 ) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval()
__SCREAMING_SNAKE_CASE = model(**lowercase_ )
__SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
__SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
__SCREAMING_SNAKE_CASE = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
__SCREAMING_SNAKE_CASE = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class UpperCamelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : Optional[Any] = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
snake_case__ : List[Any] = True
snake_case__ : Dict = False
snake_case__ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) -> Optional[int]:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase_ ( self : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = MaMaaaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_class.from_pretrained(lowercase_ , output_loading_info=lowercase_ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__SCREAMING_SNAKE_CASE = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
__SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(lowercase_ , lowercase_ ) )
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = inputs["input_ids"]
del inputs["input_ids"]
else:
__SCREAMING_SNAKE_CASE = inputs["input_ids"]
__SCREAMING_SNAKE_CASE = inputs.get("decoder_input_ids" , lowercase_ )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase_ )
__SCREAMING_SNAKE_CASE = model.get_input_embeddings()
if not self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = wte(lowercase_ )
else:
__SCREAMING_SNAKE_CASE = wte(lowercase_ )
__SCREAMING_SNAKE_CASE = wte(lowercase_ )
with torch.no_grad():
model(**lowercase_ )[0]
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowercase_ )
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ )
if torch_device == "cuda":
model.half()
model.generate(lowercase_ , attention_mask=lowercase_ )
model.generate(num_beams=4 , do_sample=lowercase_ , early_stopping=lowercase_ , num_return_sequences=3 )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
a__ : Tuple = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self : Any ) -> Any:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
__SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowercase_ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
# change to intended input
__SCREAMING_SNAKE_CASE = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
__SCREAMING_SNAKE_CASE = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
__SCREAMING_SNAKE_CASE = prepare_mam_aaa_inputs_dict(model.config , lowercase_ , lowercase_ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowercase_ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=lowercase_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase_ )
__SCREAMING_SNAKE_CASE = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
__SCREAMING_SNAKE_CASE = [
"L\'affaire NSA souligne l\'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__SCREAMING_SNAKE_CASE = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=dct["input_ids"].to(lowercase_ ) , attention_mask=dct["attention_mask"].to(lowercase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
__SCREAMING_SNAKE_CASE = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase_ , skip_special_tokens=lowercase_ )
assert generated == expected_en
| 54 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 |
"""simple docstring"""
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] ) -> str:
'''simple docstring'''
return F"""Node({self.data})"""
class UpperCAmelCase_ :
def __init__( self : Dict ) -> Any:
'''simple docstring'''
A__ = None
def __iter__( self : List[Any] ) -> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : Any ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return "->".join([str(snake_case_ ) for item in self] )
def __getitem__( self : str , snake_case_ : int ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(snake_case_ ):
A__ = current.next
A__ = data
def __magic_name__ ( self : List[Any] , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : Any ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(snake_case_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __magic_name__ ( self : Dict ) -> None: # print every node data
'''simple docstring'''
print(self )
def __magic_name__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def __magic_name__ ( self : Optional[Any] ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __magic_name__ ( self : Any , snake_case_ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __magic_name__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.head is None
def __magic_name__ ( self : List[Any] ) -> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(f"""length of linked_list is : {len(lowercase_ )}""" )
if __name__ == "__main__":
main()
| 230 | 0 |
import argparse
from collections import defaultdict
import yaml
A__ : int = 'docs/source/en/_toctree.yml'
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(lowerCamelCase_ )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A__ : Optional[int] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 207 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, *lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Any=None, **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowercase__ ( self : int, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str = "eval" ):
'''simple docstring'''
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(lowerCamelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCamelCase )
return metrics
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int=None, lowerCamelCase : str = "test" ):
'''simple docstring'''
lowercase__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowercase__ = time.time()
try:
lowercase__ = eval_loop(
lowerCamelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCamelCase, metric_key_prefix=lowerCamelCase, )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowerCamelCase, lowerCamelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(lowerCamelCase, lowerCamelCase, output.predictions, '''predict''' )
lowercase__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowercase__ = metrics.pop(lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCamelCase )
| 207 | 1 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
_a : Union[str, Any] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_a : List[Any] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_a : int = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Optional[Any] = '▁'
__snake_case : int = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
__snake_case : List[str] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
__snake_case : List[str] = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
__snake_case : Any = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
__snake_case : Optional[Any] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = ["input_ids"]
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = RESOURCE_FILES_NAMES
def __init__( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[str]="utf8" , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : Any="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : Any="[MASK]" , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : int , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ : Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : str =do_lower_case
A__ : Any =sentencepiece_model_ckpt
A__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ : str =self.load_vocab(filepath=UpperCamelCase__ )
else:
A__ : Optional[Any] ={self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
A__ : str ={v: k for k, v in self.vocab.items()}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
if text is None:
return None
A__ : Optional[Any] =self.tokenize(UpperCamelCase__ )
A__ : List[str] ="", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
A__ : List[Any] =self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
A__ : Any =unicodedata.normalize("""NFKC""" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
A__ : Dict =normalized_text, [], 0
if self.do_lower_case:
A__ : Optional[int] =text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ : Optional[Any] =token[1:]
A__ : Dict =text[offset:].index(UpperCamelCase__ ) + offset
A__ : Union[str, Any] =start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A__ : str =end
return token_mapping
@property
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
return len(self.vocab )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Dict =None
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : List[Any] ={}
A__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=64 , lowerCAmelCase_ : Optional[int]=0.1 ) -> int:
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A__ : Tuple =True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A__ : int =self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A__ : str =self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A__ : Dict =self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
A__ : Optional[int] =self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple =[]
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
A__ : Dict =0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
A__ : List[Any] =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ : Any =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A__ : Optional[Any] =i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] =self.convert_ids_to_tokens(UpperCamelCase__ )
A__ : Tuple ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=None ) -> Tuple:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ : List[str] =[self.cls_token_id]
A__ : Optional[Any] =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=False ) -> str:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] = None ) -> List[int]:
'''simple docstring'''
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase__ ( self : str , lowerCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase__ ( self : Any , lowerCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
A__ : str =unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Any ={}
with io.open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
A__ : Union[str, Any] =line.rstrip("""\n""" )
A__ : Union[str, Any] =int(UpperCamelCase__ )
return token_to_idx
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] = None ) -> Tuple[str]:
'''simple docstring'''
A__ : Union[str, Any] =0
if os.path.isdir(UpperCamelCase__ ):
A__ : Optional[Any] =os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A__ : str =(filename_prefix + "-" if filename_prefix else "") + save_directory
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
A__ : List[Any] =token_index
writer.write(token + """\n""" )
index += 1
A__ : Union[str, Any] =os.path.join(UpperCamelCase__ , """sentencepiece.bpe.model""" )
with open(UpperCamelCase__ , """wb""" ) as fi:
A__ : Any =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 134 |
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase__ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _a ( a :Union[str, Any] , a :List[Any] , a :str , a :List[str] , a :Any , a :List[Any] ) -> Dict:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a = "lm_head"
a = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( a :int , a :Dict , a :List[str] ) -> int:
a = []
a = fairseq_model.state_dict()
a = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
a = True
else:
for key, mapped_key in MAPPING.items():
a = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''' , __lowerCamelCase )
if "weight_g" in name:
a = "weight_g"
elif "weight_v" in name:
a = "weight_v"
elif "bias" in name:
a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = "weight"
else:
a = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( a :str , a :Any , a :List[str] , a :Tuple , a :str ) -> Tuple:
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _a ( a :Union[str, Any] , a :str , a :str=None , a :Optional[Any]=None , a :int=True ) -> List[str]:
if config_path is not None:
a = UniSpeechConfig.from_pretrained(__lowerCamelCase )
else:
a = UniSpeechConfig()
if is_finetuned:
if dict_path:
a = Dictionary.load_from_json(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 42
a = 43
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaPhonemeCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == "layer" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = UniSpeechForCTC(__lowerCamelCase )
else:
a = UniSpeechForPreTraining(__lowerCamelCase )
if is_finetuned:
a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_unispeech.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 360 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : int ) -> str:
_UpperCAmelCase : list[list[str]] = [[] for _ in range(_lowerCAmelCase )]
_UpperCAmelCase : str = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_lowerCAmelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCAmelCase ):
_UpperCAmelCase : Any = position % (lowest * 2) # puts it in bounds
_UpperCAmelCase : Any = min(_lowerCAmelCase, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = ["""""".join(_lowerCAmelCase ) for row in temp_grid]
_UpperCAmelCase : Optional[Any] = """""".join(_lowerCAmelCase )
return output_string
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : int ) -> str:
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
_UpperCAmelCase : list[list[str]] = [[] for _ in range(_lowerCAmelCase )] # generates template
for position in range(len(_lowerCAmelCase ) ):
_UpperCAmelCase : List[Any] = position % (lowest * 2) # puts it in bounds
_UpperCAmelCase : List[Any] = min(_lowerCAmelCase, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
_UpperCAmelCase : Tuple = 0
for row in temp_grid: # fills in the characters
_UpperCAmelCase : Tuple = input_string[counter : counter + len(_lowerCAmelCase )]
grid.append(list(_lowerCAmelCase ) )
counter += len(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = """""" # reads as zigzag
for position in range(len(_lowerCAmelCase ) ):
_UpperCAmelCase : Tuple = position % (lowest * 2) # puts it in bounds
_UpperCAmelCase : Union[str, Any] = min(_lowerCAmelCase, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase ( _lowerCAmelCase : str ) -> dict[int, str]:
_UpperCAmelCase : List[str] = {}
for key_guess in range(1, len(_lowerCAmelCase ) ): # tries every key
_UpperCAmelCase : Optional[Any] = decrypt(_lowerCAmelCase, _lowerCAmelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( _snake_case ):
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self , __snake_case = True , __snake_case = 32 , __snake_case=PILImageResampling.BILINEAR , __snake_case = True , **__snake_case , ) -> None:
'''simple docstring'''
__a =do_resize
__a =do_rescale
__a =size_divisor
__a =resample
super().__init__(**UpperCamelCase__ )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case ) -> np.ndarray:
'''simple docstring'''
__a , __a =get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
__a =height // size_divisor * size_divisor
__a =width // size_divisor * size_divisor
__a =resize(UpperCamelCase__ , (new_h, new_w) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
return image
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case=None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> BatchFeature:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =do_rescale if do_rescale is not None else self.do_rescale
__a =size_divisor if size_divisor is not None else self.size_divisor
__a =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a =make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
__a =[self.resize(UpperCamelCase__ , size_divisor=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
__a =[self.rescale(UpperCamelCase__ , scale=1 / 255 ) for image in images]
__a =[to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
__a ={'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 363 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'yolos'
def __init__( self , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=[512, 864] , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=100 , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =initializer_range
__a =layer_norm_eps
__a =image_size
__a =patch_size
__a =num_channels
__a =qkv_bias
__a =num_detection_tokens
__a =use_mid_position_embeddings
__a =auxiliary_loss
# Hungarian matcher
__a =class_cost
__a =bbox_cost
__a =giou_cost
# Loss coefficients
__a =bbox_loss_coefficient
__a =giou_loss_coefficient
__a =eos_coefficient
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __magic_name__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return 12
| 308 | 0 |
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a__ ( *_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
lowerCAmelCase__ = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ = torch.device('''cuda''', local_rank)
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ = dist.get_rank()
lowerCAmelCase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 153 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 362 |
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase_ = 25_6047
UpperCAmelCase_ = 25_6145
@require_sentencepiece
@require_tokenizers
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = NllbTokenizer
a : Dict = NllbTokenizerFast
a : List[Any] = True
a : int = True
a : Dict = {}
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ : Dict = NllbTokenizer(__magic_name__, keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = NllbTokenizer(__magic_name__, keep_accents=__magic_name__ )
UpperCamelCase__ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__magic_name__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCamelCase__ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
UpperCamelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
UpperCamelCase__ : str = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__magic_name__, **__magic_name__ )
UpperCamelCase__ : Tuple = self.tokenizer_class.from_pretrained(__magic_name__, **__magic_name__ )
UpperCamelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCamelCase__ : int = tokenizer_r.save_pretrained(__magic_name__ )
UpperCamelCase__ : List[Any] = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
UpperCamelCase__ : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__magic_name__, __magic_name__ )
# Checks everything loads correctly in the same way
UpperCamelCase__ : str = tokenizer_r.from_pretrained(__magic_name__ )
UpperCamelCase__ : Optional[int] = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__, __magic_name__ ) )
shutil.rmtree(__magic_name__ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCamelCase__ : Tuple = tokenizer_r.save_pretrained(__magic_name__, legacy_format=__magic_name__ )
UpperCamelCase__ : List[str] = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it save with the same files
self.assertSequenceEqual(__magic_name__, __magic_name__ )
# Checks everything loads correctly in the same way
UpperCamelCase__ : Tuple = tokenizer_r.from_pretrained(__magic_name__ )
UpperCamelCase__ : List[str] = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__, __magic_name__ ) )
shutil.rmtree(__magic_name__ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ : int = tempfile.mkdtemp()
UpperCamelCase__ : Tuple = tokenizer_r.save_pretrained(__magic_name__, legacy_format=__magic_name__ )
UpperCamelCase__ : List[str] = tokenizer_p.save_pretrained(__magic_name__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ : Dict = tokenizer_r.from_pretrained(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = tokenizer_p.from_pretrained(__magic_name__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__magic_name__, __magic_name__ ) )
shutil.rmtree(__magic_name__ )
@require_torch
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
if not self.test_seqaseq:
return
UpperCamelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCamelCase__ : str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase__ : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
UpperCamelCase__ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=__magic_name__, tgt_texts=__magic_name__, max_length=3, max_target_length=10, return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''ron_Latn''', )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 10 )
# max_target_length will default to max_length if not specified
UpperCamelCase__ : str = tokenizer.prepare_seqaseq_batch(
__magic_name__, tgt_texts=__magic_name__, max_length=3, return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
UpperCamelCase__ : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=__magic_name__, max_length=3, max_target_length=10, return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn('''decoder_input_ids''', __magic_name__ )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ : Dict = [AddedToken('''<special>''', lstrip=__magic_name__ )]
UpperCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[str] = tokenizer_r.encode('''Hey this is a <special> token''' )
UpperCamelCase__ : Dict = tokenizer_r.encode('''<special>''', add_special_tokens=__magic_name__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__, )
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(
__magic_name__, additional_special_tokens=__magic_name__, **__magic_name__ )
UpperCamelCase__ : int = tokenizer_p.encode('''Hey this is a <special> token''' )
UpperCamelCase__ : List[str] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(__magic_name__, __magic_name__ )
self.assertEqual(__magic_name__, __magic_name__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = "facebook/nllb-200-distilled-600M"
a : str = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a : Union[str, Any] = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''eng_Latn''', tgt_lang='''ron_Latn''' )
UpperCamelCase__ : int = 1
return cls
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''], 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''], 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''], 256057 )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, __magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(__magic_name__, self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase__ : Optional[int] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCamelCase__ : List[str] = self.tokenizer.decode(__magic_name__, skip_special_tokens=__magic_name__ )
UpperCamelCase__ : str = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__, __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], __magic_name__ )
UpperCamelCase__ : Any = 10
UpperCamelCase__ : int = self.tokenizer(__magic_name__, max_length=__magic_name__, truncation=__magic_name__ ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], __magic_name__ )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [256203, 3] )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : int = tempfile.mkdtemp()
UpperCamelCase__ : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__magic_name__ )
UpperCamelCase__ : List[Any] = NllbTokenizer.from_pretrained(__magic_name__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, __magic_name__ )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=__magic_name__, truncation=__magic_name__, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
UpperCamelCase__ : Union[str, Any] = shift_tokens_right(
batch['''labels'''], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(__magic_name__, __magic_name__ )
self.assertEqual((2, 15), batch.input_ids.shape )
self.assertEqual((2, 15), batch.attention_mask.shape )
UpperCamelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, __magic_name__ )
self.assertEqual(__magic_name__, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.tokenizer(self.src_text, padding=__magic_name__, truncation=__magic_name__, max_length=3, return_tensors='''pt''' )
UpperCamelCase__ : Dict = self.tokenizer(
text_target=self.tgt_text, padding=__magic_name__, truncation=__magic_name__, max_length=10, return_tensors='''pt''' )
UpperCamelCase__ : str = targets['''input_ids''']
UpperCamelCase__ : List[str] = shift_tokens_right(
__magic_name__, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(__magic_name__ ), {
# A, test, EOS, en_XX
'''input_ids''': [[256047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 256057,
}, )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Dict = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCamelCase__ : Any = False
UpperCamelCase__ : Dict = self.tokenizer(
'''UN Chief says there is no military solution in Syria''', src_lang='''eng_Latn''', tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids, [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 201 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> list[int]:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = len(__UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Tuple = i + 1
else:
UpperCamelCase__ : str = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 201 | 1 |
'''simple docstring'''
import string
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_UpperCAmelCase : Optional[int] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_UpperCAmelCase : List[Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = num - key
if num < 0:
_UpperCAmelCase : List[Any] = num + len(string.ascii_uppercase )
_UpperCAmelCase : int = translated + string.ascii_uppercase[num]
else:
_UpperCAmelCase : Union[str, Any] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def snake_case_ ( )-> None:
'''simple docstring'''
_UpperCAmelCase : Dict = input("""Encrypted message: """ )
_UpperCAmelCase : Tuple = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = len(lowerCamelCase__ )
for i in range(length - 1 ):
_lowerCAmelCase : List[str] = i
for k in range(i + 1 ,lowerCamelCase__ ):
if collection[k] < collection[least]:
_lowerCAmelCase : str = k
if least != i:
_lowerCAmelCase : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : Any = input('Enter numbers separated by a comma:\n').strip()
_a : List[Any] = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 44 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : List[str] = hans_processors[task]()
lowercase__ : Dict = os.path.join(
SCREAMING_SNAKE_CASE , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , ) , )
lowercase__ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : Union[str, Any] = label_list[2], label_list[1]
lowercase__ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ : int = cached_features_file + ".lock"
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowercase__ : Any = torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowercase__ : List[str] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
)
logger.info("Training examples: %s" , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info("Saving features into cached file %s" , SCREAMING_SNAKE_CASE )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE : List[str] ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__:
"""simple docstring"""
lowercase_ = 42
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = 128 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : str = hans_processors[task]()
lowercase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : str = label_list[2], label_list[1]
lowercase__ : Optional[int] = label_list
lowercase__ : Any = processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ : Optional[int] = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def snake_case ( self : int ):
return self.dataset
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_train_set.txt" ) ) , "train" )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_evaluation_set.txt" ) ) , "dev" )
def snake_case ( self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Dict = []
for i, line in enumerate(SCREAMING_SNAKE_CASE ):
if i == 0:
continue
lowercase__ : str = "%s-%s" % (set_type, line[0])
lowercase__ : str = line[5]
lowercase__ : List[str] = line[6]
lowercase__ : Dict = line[7][2:] if line[7].startswith("ex" ) else line[7]
lowercase__ : Union[str, Any] = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE , text_a=SCREAMING_SNAKE_CASE , text_b=SCREAMING_SNAKE_CASE , label=SCREAMING_SNAKE_CASE , pairID=SCREAMING_SNAKE_CASE ) )
return examples
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : str = {label: i for i, label in enumerate(lowerCamelCase__ )}
lowercase__ : str = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowercase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
lowercase__ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowercase__ : Any = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowerCAmelCase__ = {
'''hans''': 3,
}
lowerCAmelCase__ = {
'''hans''': HansProcessor,
}
| 130 | 0 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __A :
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
snake_case_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = inputs['prompt']
snake_case_ = inputs['generator']
snake_case_ = inputs['num_inference_steps']
snake_case_ = inputs['output_type']
if "image" in inputs:
snake_case_ = inputs['image']
else:
snake_case_ = None
if "mask_image" in inputs:
snake_case_ = inputs['mask_image']
else:
snake_case_ = None
if "original_image" in inputs:
snake_case_ = inputs['original_image']
else:
snake_case_ = None
snake_case_ = pipe.encode_prompt(UpperCAmelCase_ )
# inputs with prompt converted to embeddings
snake_case_ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.pipeline_class.from_pretrained(UpperCAmelCase_ )
pipe_loaded.to(UpperCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase_ , UpperCAmelCase_ ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = inputs['generator']
snake_case_ = inputs['num_inference_steps']
snake_case_ = inputs['output_type']
# inputs with prompt converted to embeddings
snake_case_ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
snake_case_ = image
if mask_image is not None:
snake_case_ = mask_image
if original_image is not None:
snake_case_ = original_image
snake_case_ = pipe_loaded(**UpperCAmelCase_ )[0]
snake_case_ = np.abs(to_np(UpperCAmelCase_ ) - to_np(UpperCAmelCase_ ) ).max()
self.assertLess(UpperCAmelCase_ , 1E-4 )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe(**UpperCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase_ )
snake_case_ = self.pipeline_class.from_pretrained(UpperCAmelCase_ )
pipe_loaded.to(UpperCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = pipe_loaded(**UpperCAmelCase_ )[0]
snake_case_ = np.abs(to_np(UpperCAmelCase_ ) - to_np(UpperCAmelCase_ ) ).max()
self.assertLess(UpperCAmelCase_ , 1E-4 )
| 368 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE : Tuple = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ) ->List[Any]:
"""simple docstring"""
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
snake_case_ = os.path.abspath("""examples""" )
for item in os.listdir(UpperCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case_ = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = """\n""".join(UpperCAmelCase_ )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(UpperCAmelCase_ , """""" )
self.assertEqual(UpperCAmelCase_ , """""" )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
snake_case_ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = False
@classmethod
def lowerCAmelCase ( cls : Any ) ->List[str]:
"""simple docstring"""
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase ( cls : List[str] ) ->int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
else:
self.assertIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
snake_case_ = re.findall("""({.+})""" , UpperCAmelCase_ )
snake_case_ = [r for r in results if """accuracy""" in r][-1]
snake_case_ = ast.literal_eval(UpperCAmelCase_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , """tracking""" ) ) )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 233 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Any = 1_024
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4_096
SCREAMING_SNAKE_CASE__ : Tuple = 24
SCREAMING_SNAKE_CASE__ : Any = 16
SCREAMING_SNAKE_CASE__ : Any = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [256, 512, 1_024, 1_024]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (1, 384, 384)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 150
SCREAMING_SNAKE_CASE__ : int = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : int = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : Tuple = json.load(open(cached_download(hf_hub_url(_snake_case ,_snake_case ,repo_type="""dataset""" ) ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = idalabel
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowercase_ ( _snake_case ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""patch_embed""" ,"""patch_embeddings""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
SCREAMING_SNAKE_CASE__ : List[str] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
return name
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : str = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dpt_config(_snake_case )
# load original state_dict from URL
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_snake_case )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_snake_case ,_snake_case )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTForSemanticSegmentation(_snake_case ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE__ : Tuple = 480 if """ade""" in checkpoint_url else 384
SCREAMING_SNAKE_CASE__ : str = DPTImageProcessor(size=_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(_snake_case ,return_tensors="""pt""" )
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_snake_case ).logits if """ade""" in checkpoint_url else model(**_snake_case ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(_snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,_snake_case )
)
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=_snake_case ,)
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=_snake_case ,)
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 25 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = int(__A)
# Initialize Result
_a = []
# Traverse through all denomination
for denomination in reversed(__A):
# Find denominations
while int(__A) >= int(__A):
total_value -= int(__A)
answer.append(__A) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowercase_ = []
lowercase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
lowercase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowercase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
lowercase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
lowercase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 211 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('''T''')
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (position - 1) // 2
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (2 * position) + 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (2 * position) + 2
class A__ ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
A_ = []
A_ = {}
A_ = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.heap )
def snake_case_ ( self ) -> bool:
'''simple docstring'''
return self.elements == 0
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
A_ = self.elements
self.elements += 1
self._bubble_up(UpperCamelCase__ )
def snake_case_ ( self ) -> T:
'''simple docstring'''
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A_ , A_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A_ , A_ = self.heap[0]
self._bubble_down(UpperCamelCase__ )
return elem
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = self.position_map[elem]
A_ = (elem, weight)
if position > 0:
A_ = get_parent_position(UpperCamelCase__ )
A_ , A_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCamelCase__ )
else:
self._bubble_down(UpperCamelCase__ )
else:
self._bubble_down(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = self.position_map[elem]
if curr_pos == 0:
return None
A_ = get_parent_position(UpperCamelCase__ )
A_ , A_ = self.heap[curr_pos]
A_ , A_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__ )
return self._bubble_up(UpperCamelCase__ )
return None
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = self.position_map[elem]
A_ , A_ = self.heap[curr_pos]
A_ = get_child_left_position(UpperCamelCase__ )
A_ = get_child_right_position(UpperCamelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
A_ , A_ = self.heap[child_left_position]
A_ , A_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
if child_left_position < self.elements:
A_ , A_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
else:
return None
if child_right_position < self.elements:
A_ , A_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCamelCase__ , UpperCamelCase__ )
return self._bubble_down(UpperCamelCase__ )
return None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = self.heap[nodea_pos][0]
A_ = self.heap[nodea_pos][0]
A_ , A_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A_ = nodea_pos
A_ = nodea_pos
class A__ ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
A_ = {}
A_ = 0
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self ) -> int:
'''simple docstring'''
return self.nodes
def snake_case_ ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A_ = {}
self.nodes += 1
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
self.add_node(UpperCamelCase__ )
self.add_node(UpperCamelCase__ )
A_ = weight
A_ = weight
def UpperCAmelCase__ ( UpperCAmelCase__, ) -> tuple[dict[T, int], dict[T, T | None]]:
A_ = {node: maxsize for node in graph.connections}
A_ = {node: None for node in graph.connections}
A_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCAmelCase__, UpperCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
A_ = priority_queue.extract_min()
A_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__, dist[neighbour] )
A_ = node
# running prim's algorithm
while not priority_queue.is_empty():
A_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase__, dist[neighbour] )
A_ = node
return dist, parent
| 368 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 101 | 0 |
import torch
from diffusers import StableDiffusionPipeline
a__ = """path-to-your-trained-model"""
a__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
a__ = """A photo of sks dog in a bucket"""
a__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 317 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 1 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__lowerCAmelCase = CLIPImageProcessor()
__lowerCAmelCase = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__lowerCAmelCase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 366 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return "".join(sorted(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCAmelCase_ )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 107 | 0 |
lowercase__ ='Input must be a string of 8 numbers plus letter'
lowercase__ ='TRWAGMYFPDXBNJZSQVHLCKE'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : List[Any] = f"Expected string as input, found {type(lowerCAmelCase__ ).__name__}"
raise TypeError(lowerCAmelCase__ )
__a : Optional[Any] = spanish_id.replace('''-''' , '''''' ).upper()
if len(lowerCAmelCase__ ) != 9:
raise ValueError(lowerCAmelCase__ )
try:
__a : int = int(spanish_id_clean[0:8] )
__a : Union[str, Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase__ ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase__ )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
# vision encoder
if "img_encoder.pos_embed" in name:
__a : Any = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
__a : str = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
__a : int = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
__a : Union[str, Any] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
__a : List[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
__a : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
__a : List[Any] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
__a : Any = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
__a : Union[str, Any] = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
__a : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
__a : Union[str, Any] = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
__a : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
__a : Any = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
__a : Any = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
__a : str = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__a : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__a : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
__a : Optional[int] = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
__a : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
__a : List[str] = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
__a : str = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
__a : int = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
__a : List[str] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
for key in orig_state_dict.copy().keys():
__a : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : Tuple = key.split('''.''' )
__a , __a : List[Any] = int(key_split[2] ), int(key_split[4] )
__a : List[Any] = config.vision_config.hidden_size
if "weight" in key:
__a : int = val[:dim, :]
__a : List[str] = val[dim : dim * 2, :]
__a : List[Any] = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : int = val[dim : dim * 2]
__a : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : int = key.split('''.''' )
__a : str = int(key_split[3] )
__a : List[Any] = config.text_config.hidden_size
if "weight" in key:
__a : List[str] = val[:dim, :]
__a : Any = val[
dim : dim * 2, :
]
__a : Dict = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : Any = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : Union[str, Any] = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__a : List[Any] = val.squeeze_()
else:
__a : Dict = val
return orig_state_dict
def __UpperCamelCase ( ):
__a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]="groupvit-gcc-yfcc" , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = GroupViTConfig()
__a : int = GroupViTModel(lowerCAmelCase__ ).eval()
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a : Optional[Any] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : Dict = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
__a : Any = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
__a : Optional[Any] = prepare_img()
__a : Optional[int] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__a : Tuple = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__a : List[str] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__a : List[str] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase__ =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 216 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( a__ ):
"""simple docstring"""
a_ = """encoder-decoder"""
a_ = True
def __init__( self : Optional[int] , **lowerCAmelCase_ : Any ) -> Union[str, Any]:
super().__init__(**lowerCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowerCAmelCase = kwargs.pop('encoder' )
__lowerCAmelCase = encoder_config.pop('model_type' )
__lowerCAmelCase = kwargs.pop('decoder' )
__lowerCAmelCase = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowerCAmelCase = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowerCAmelCase = True
@classmethod
def lowercase ( cls : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Optional[int] ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__lowerCAmelCase = True
__lowerCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase__ )
def lowercase ( self : int ) -> Tuple:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.encoder.to_dict()
__lowerCAmelCase = self.decoder.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 363 |
from collections.abc import Sequence
def a_ ( lowerCAmelCase_ : Sequence[float], lowerCAmelCase_ : bool = False ):
if not arr:
return 0
__lowerCAmelCase = 0 if allow_empty_subarrays else float('-inf' )
__lowerCAmelCase = 0.0
for num in arr:
__lowerCAmelCase = max(0 if allow_empty_subarrays else num, curr_sum + num )
__lowerCAmelCase = max(lowerCAmelCase_, lowerCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_snake_case : Optional[Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 207 | 0 |
from math import pi, sqrt, tan
def _UpperCAmelCase (UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
_A : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _UpperCAmelCase (UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
_A : int = (sidea + sidea + sidea) / 2
_A : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase (UpperCamelCase__ : float ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _UpperCAmelCase (UpperCamelCase__ : float , UpperCamelCase__ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : float ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 11 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _A ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : List[Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=_A , dtype=jnp.bfloataa )
lowercase , lowercase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
lowercase : List[Any] = controlnet_params
lowercase : Tuple = '''bird'''
lowercase : List[Any] = jax.device_count()
lowercase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowercase : List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
lowercase : int = jax.random.PRNGKey(0 )
lowercase : Optional[Any] = jax.random.split(_A , jax.device_count() )
lowercase : Optional[int] = replicate(_A )
lowercase : Optional[int] = shard(_A )
lowercase : List[str] = shard(_A )
lowercase : Dict = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : Tuple = images[0, 253:256, 253:256, -1]
lowercase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : int = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase , lowercase : Any = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=_A , dtype=jnp.bfloataa )
lowercase , lowercase : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=_A , from_pt=_A , dtype=jnp.bfloataa )
lowercase : List[str] = controlnet_params
lowercase : Optional[Any] = '''Chef in the kitchen'''
lowercase : List[str] = jax.device_count()
lowercase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowercase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowercase : Any = pipe.prepare_image_inputs([pose_image] * num_samples )
lowercase : Any = jax.random.PRNGKey(0 )
lowercase : List[Any] = jax.random.split(_A , jax.device_count() )
lowercase : Tuple = replicate(_A )
lowercase : Optional[Any] = shard(_A )
lowercase : Union[str, Any] = shard(_A )
lowercase : str = pipe(
prompt_ids=_A , image=_A , params=_A , prng_seed=_A , num_inference_steps=50 , jit=_A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowercase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase : Optional[Any] = images[0, 253:256, 253:256, -1]
lowercase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : Dict = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 116 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''maskformer'''
_UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCamelCase : Dict = ['''resnet''', '''swin''']
_UpperCamelCase : Optional[int] = ['''detr''']
def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : Optional[int] = backbone_config.pop('''model_type''' )
lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase : Union[str, Any] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase : Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Union[str, Any] = (
decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
lowercase : str = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(_A )
lowercase : Tuple = backbone_config
lowercase : List[Any] = decoder_config
# main feature dimension for the model
lowercase : Optional[int] = fpn_feature_size
lowercase : List[Any] = mask_feature_size
# initializer
lowercase : Union[str, Any] = init_std
lowercase : Tuple = init_xavier_std
# Hungarian matcher && loss
lowercase : List[str] = cross_entropy_weight
lowercase : int = dice_weight
lowercase : List[Any] = mask_weight
lowercase : Tuple = use_auxiliary_loss
lowercase : Tuple = no_object_weight
lowercase : int = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any:
"""simple docstring"""
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def __a ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.backbone_config.to_dict()
lowercase : List[Any] = self.decoder_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output | 116 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return (-y * np.log(UpperCamelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase_ ) ) )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=7_00_00 ):
"""simple docstring"""
snake_case = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase_ ):
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = sigmoid_function(UpperCamelCase_ )
snake_case = np.dot(x.T ,h - y ) / y.size
snake_case = theta - alpha * gradient # updating the weights
snake_case = np.dot(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = sigmoid_function(UpperCamelCase_ )
snake_case = cost_function(UpperCamelCase_ ,UpperCamelCase_ )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = datasets.load_iris()
_SCREAMING_SNAKE_CASE : Optional[int] = iris.data[:, :2]
_SCREAMING_SNAKE_CASE : int = (iris.target != 0) * 1
_SCREAMING_SNAKE_CASE : Tuple = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return sigmoid_function(
np.dot(UpperCamelCase_ ,UpperCamelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
(_SCREAMING_SNAKE_CASE) : str = (x[:, 0].min(), x[:, 0].max())
(_SCREAMING_SNAKE_CASE) : str = (x[:, 1].min(), x[:, 1].max())
(_SCREAMING_SNAKE_CASE) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_SCREAMING_SNAKE_CASE : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
_SCREAMING_SNAKE_CASE : Tuple = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 127 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : int = 'audio-spectrogram-transformer'
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = patch_size
_UpperCamelCase = qkv_bias
_UpperCamelCase = frequency_stride
_UpperCamelCase = time_stride
_UpperCamelCase = max_length
_UpperCamelCase = num_mel_bins
| 324 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case : List[str] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
__snake_case : Dict = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase__ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase__ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 122 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
def _UpperCamelCase ( lowercase__ ):
# Validation
def is_valid_tree(lowercase__ ) -> bool:
if node is None:
return True
if not isinstance(lowercase__ , lowercase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
lowercase__ , lowercase__ , lowercase__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowercase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowercase__ )
)
return is_binary_search_tree_recursive_check(lowercase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
__SCREAMING_SNAKE_CASE :int = "detr"
__SCREAMING_SNAKE_CASE :List[str] = ["past_key_values"]
__SCREAMING_SNAKE_CASE :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , a__ : Optional[int]=True , a__ : List[Any]=None , a__ : Optional[Any]=3 , a__ : Optional[Any]=100 , a__ : int=6 , a__ : List[str]=2048 , a__ : Any=8 , a__ : Optional[Any]=6 , a__ : Tuple=2048 , a__ : Tuple=8 , a__ : Any=0.0 , a__ : Union[str, Any]=0.0 , a__ : List[str]=True , a__ : List[Any]="relu" , a__ : Optional[Any]=256 , a__ : int=0.1 , a__ : Any=0.0 , a__ : str=0.0 , a__ : Any=0.02 , a__ : Tuple=1.0 , a__ : Optional[Any]=False , a__ : Optional[int]="sine" , a__ : List[Any]="resnet50" , a__ : Tuple=True , a__ : int=False , a__ : Optional[int]=1 , a__ : Optional[int]=5 , a__ : Tuple=2 , a__ : Dict=1 , a__ : Union[str, Any]=1 , a__ : Optional[Any]=5 , a__ : str=2 , a__ : Any=0.1 , **a__ : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__magic_name__ = CONFIG_MAPPING['resnet'](out_features=['''stage4'''] )
elif isinstance(_A , _A ):
__magic_name__ = backbone_config.get('''model_type''' )
__magic_name__ = CONFIG_MAPPING[backbone_model_type]
__magic_name__ = config_class.from_dict(_A )
# set timm attributes to None
__magic_name__ = None, None, None
__magic_name__ = use_timm_backbone
__magic_name__ = backbone_config
__magic_name__ = num_channels
__magic_name__ = num_queries
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = init_xavier_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = encoder_layers
__magic_name__ = auxiliary_loss
__magic_name__ = position_embedding_type
__magic_name__ = backbone
__magic_name__ = use_pretrained_backbone
__magic_name__ = dilation
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = mask_loss_coefficient
__magic_name__ = dice_loss_coefficient
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
super().__init__(is_encoder_decoder=_A , **_A )
@property
def snake_case__ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def snake_case__ ( self : List[Any] ):
return self.d_model
@classmethod
def snake_case__ ( cls : Tuple , a__ : PretrainedConfig , **a__ : Union[str, Any] ):
return cls(backbone_config=_A , **_A )
def snake_case__ ( self : Any ):
__magic_name__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__magic_name__ = self.backbone_config.to_dict()
__magic_name__ = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
__SCREAMING_SNAKE_CASE :Any = version.parse("""1.11""" )
@property
def snake_case__ ( self : Dict ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def snake_case__ ( self : Tuple ):
return 1E-5
@property
def snake_case__ ( self : int ):
return 12
| 350 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE :Optional[Any] = frozenset([] )
def snake_case__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__magic_name__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionInpaintPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = sd_pipe(**a__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self : List[str] ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 98 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ = None ,A__ = None ,A__ = False ,**A__ ,):
super().__init__(features=A__ ,cache_dir=A__ ,keep_in_memory=A__ ,**A__)
lowercase = Sql(
cache_dir=A__ ,features=A__ ,sql=A__ ,con=A__ ,**A__ ,)
def A__ ( self):
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=A__ ,download_mode=A__ ,verification_mode=A__ ,base_path=A__ ,)
# Build dataset for splits
lowercase = self.builder.as_dataset(
split='''train''' ,verification_mode=A__ ,in_memory=self.keep_in_memory)
return dataset
class lowercase :
def __init__( self ,A__ ,A__ ,A__ ,A__ = None ,A__ = None ,**A__ ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.')
lowercase = dataset
lowercase = name
lowercase = con
lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase = num_proc
lowercase = to_sql_kwargs
def A__ ( self):
lowercase = self.to_sql_kwargs.pop('''sql''' ,A__)
lowercase = self.to_sql_kwargs.pop('''con''' ,A__)
lowercase = self.to_sql_kwargs.pop('''index''' ,A__)
lowercase = self._write(index=A__ ,**self.to_sql_kwargs)
return written
def A__ ( self ,A__):
lowercase , lowercase , lowercase = args
lowercase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
lowercase = query_table(
table=self.dataset.data ,key=slice(A__ ,offset + self.batch_size) ,indices=self.dataset._indices ,)
lowercase = batch.to_pandas()
lowercase = df.to_sql(self.name ,self.con ,index=A__ ,**A__)
return num_rows or len(A__)
def A__ ( self ,A__ ,**A__):
lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset) ,self.batch_size) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
lowercase , lowercase = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,A__ ,A__)] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,):
written += num_rows
return written
| 101 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 308 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 371 |
"""simple docstring"""
import re
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895""")) | 12 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a: Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = {}
if self.train_dir is not None:
lowercase__ : str = self.train_dir
if self.validation_dir is not None:
lowercase__ : Dict = self.validation_dir
lowercase__ : Any = data_files if data_files else None
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __UpperCamelCase ( ):
lowercase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ : Any = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowercase__ : Optional[int] = ds['train'].train_test_split(data_args.train_val_split )
lowercase__ : Optional[Any] = split['train']
lowercase__ : Dict = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Any = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ : Any = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : Tuple = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : Any = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase )
elif model_args.model_name_or_path:
lowercase__ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase )
else:
lowercase__ : Any = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__ : List[str] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ : Tuple = ViTMAEForPreTraining(UpperCAmelCase )
if training_args.do_train:
lowercase__ : Tuple = ds['train'].column_names
else:
lowercase__ : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
lowercase__ : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
lowercase__ : Optional[int] = 'image'
elif "img" in column_names:
lowercase__ : List[str] = 'img'
else:
lowercase__ : Optional[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__ : Any = image_processor.size['shortest_edge']
else:
lowercase__ : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
lowercase__ : int = Compose(
[
Lambda(lambda UpperCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase ):
lowercase__ : str = [transforms(UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ : List[str] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ : Dict = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase )
# Compute absolute learning rate
lowercase__ : Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__ : str = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase__ : List[str] = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Union[str, Any] = last_checkpoint
lowercase__ : List[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : int = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
# Write model card and (optionally) push to hub
lowercase__ : str = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase ):
main()
if __name__ == "__main__":
main()
| 198 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE , col + 1 )
__lowerCAmelCase: Tuple = update_area_of_max_square(row + 1 , col + 1 )
__lowerCAmelCase: int = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: List[str] = 1 + min([right, diagonal, down] )
__lowerCAmelCase: List[str] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
__lowerCAmelCase: List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCAmelCase: List[Any] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if mat[row][col]:
__lowerCAmelCase: int = 1 + min([right, diagonal, down] )
__lowerCAmelCase: Union[str, Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCAmelCase: int = [0]
__lowerCAmelCase: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: int = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCAmelCase: Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: Union[str, Any] = dp_array[row][col + 1]
__lowerCAmelCase: str = dp_array[row + 1][col + 1]
__lowerCAmelCase: Optional[int] = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCAmelCase: Optional[Any] = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(dp_array[row][col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Dict = 0
return largest_square_area
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Tuple = [0] * (cols + 1)
__lowerCAmelCase: Optional[int] = [0] * (cols + 1)
__lowerCAmelCase: str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCAmelCase: int = current_row[col + 1]
__lowerCAmelCase: Union[str, Any] = next_row[col + 1]
__lowerCAmelCase: Any = next_row[col]
if mat[row][col] == 1:
__lowerCAmelCase: str = 1 + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = max(current_row[col] , SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 322 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
_snake_case : Optional[int] = TypeVar('KEY')
_snake_case : Union[str, Any] = TypeVar('VAL')
@dataclass(frozen=_a ,slots=_a )
class A ( Generic[KEY, VAL] ):
lowercase_ = 42
lowercase_ = 42
class A ( _Item ):
def __init__( self : Tuple ) -> None:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __bool__( self : int ) -> bool:
"""simple docstring"""
return False
_snake_case : str = _DeletedItem()
class A ( MutableMapping[KEY, VAL] ):
def __init__( self : str , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : float = 0.7_5 ) -> None:
"""simple docstring"""
_a = initial_block_size
_a = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a = capacity_factor
_a = 0
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : KEY ) -> int:
"""simple docstring"""
return hash(lowerCAmelCase_ ) % len(self._buckets )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : int ) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ) -> bool:
"""simple docstring"""
_a = self._buckets[ind]
if not stored:
_a = _Item(lowerCAmelCase_ , lowerCAmelCase_ )
self._len += 1
return True
elif stored.key == key:
_a = _Item(lowerCAmelCase_ , lowerCAmelCase_ )
return True
else:
return False
def __lowerCAmelCase ( self : Optional[int] ) -> bool:
"""simple docstring"""
_a = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
_a = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> None:
"""simple docstring"""
_a = self._buckets
_a = [None] * new_size
_a = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def __lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : KEY ) -> Iterator[int]:
"""simple docstring"""
_a = self._get_bucket_index(lowerCAmelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
_a = self._get_next_ind(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
if self._try_set(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
break
def __setitem__( self : Optional[Any] , lowerCAmelCase_ : KEY , lowerCAmelCase_ : VAL ) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase_ , lowerCAmelCase_ )
def __delitem__( self : List[Any] , lowerCAmelCase_ : KEY ) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
_a = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase_ )
if item is _deleted:
continue
if item.key == key:
_a = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , lowerCAmelCase_ : KEY ) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(lowerCAmelCase_ ):
_a = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase_ )
def __len__( self : str ) -> int:
"""simple docstring"""
return self._len
def __iter__( self : int ) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Dict ) -> str:
"""simple docstring"""
_a = ''' ,'''.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 361 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( _a ):
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : Optional[Any]=10_24 , lowerCAmelCase_ : Tuple=3.6 ) -> List[Any]:
"""simple docstring"""
_a = tokenizer
_a = tokenizer.bos_token_id
_a = dataset
_a = seq_length
_a = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any ) -> int:
"""simple docstring"""
_a = iter(self.dataset )
_a = True
while more_examples:
_a , _a = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase_ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_a = False
break
_a = tokenizer(lowerCAmelCase_ , truncation=lowerCAmelCase_ )['''input_ids''']
_a = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase_ ) , self.seq_length ):
_a = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase_ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase_ )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = {'''streaming''': True}
_a = load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase )
_a = ConstantLengthDataset(UpperCamelCase , UpperCamelCase , seq_length=args.seq_length )
_a = DataLoader(UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
model.eval()
_a = []
for step, batch in enumerate(UpperCamelCase ):
with torch.no_grad():
_a = model(UpperCamelCase , labels=UpperCamelCase )
_a = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_a = torch.mean(torch.cat(UpperCamelCase ) )
try:
_a = torch.exp(UpperCamelCase )
except OverflowError:
_a = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_snake_case : List[str] = Accelerator()
# Parse configuration
_snake_case : List[str] = HfArgumentParser(EvaluationArguments)
_snake_case : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
_snake_case : Any = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_snake_case : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_snake_case : List[str] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_snake_case , _snake_case : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_snake_case , _snake_case : int = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 179 | 0 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = torch.device("cpu")
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ : int = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> List[str]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) -> Dict:
lowercase_ : int = dct.pop(UpperCAmelCase__ )
lowercase_ : str = val
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Any:
lowercase_ : int = []
for k in state_dict.keys():
lowercase_ : Union[str, Any] = k
if ".pwconv" in k:
lowercase_ : Optional[Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowercase_ : Union[str, Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowercase_ : Any = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowercase_ : Union[str, Any] = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowercase_ : List[str] = k_new.split(""".""" )
if ls[2].isdigit():
lowercase_ : Tuple = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowercase_ : List[str] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> Tuple:
lowercase_ : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase_ : Any = 1000
lowercase_ : List[str] = """huggingface/label-files"""
lowercase_ : Union[str, Any] = """imagenet-1k-id2label.json"""
lowercase_ : Any = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ : Optional[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ : Optional[Any] = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase_ : List[Any] = [3, 3, 6, 4]
lowercase_ : Dict = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowercase_ : Tuple = [3, 3, 9, 6]
lowercase_ : Optional[int] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowercase_ : str = [4, 3, 10, 5]
lowercase_ : Union[str, Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowercase_ : Tuple = [4, 4, 12, 6]
lowercase_ : List[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowercase_ : Optional[int] = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="""cpu""" , check_hash=UpperCAmelCase__ )
else:
lowercase_ : Optional[int] = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
lowercase_ : Any = checkpoint
lowercase_ : Optional[Any] = create_rename_keys(UpperCAmelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# load HuggingFace model
lowercase_ : Tuple = SwiftFormerForImageClassification(UpperCAmelCase__ ).eval()
hf_model.load_state_dict(UpperCAmelCase__ )
# prepare test inputs
lowercase_ : List[str] = prepare_img()
lowercase_ : List[str] = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowercase_ : Tuple = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )
# compare outputs from both models
lowercase_ : List[Any] = get_expected_output(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCAmelCase__ , atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_lowercase : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 21 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : Union[str, Any] = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ):
UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCAmelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('RGB' )
return image
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = dct.pop(lowercase_ )
UpperCAmelCase = val
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(lowercase_ , requires_grad=lowercase_ ), v_bias) )
UpperCAmelCase = qkv_bias
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = 364 if 'coco' in model_name else 224
UpperCAmelCase = BlipaVisionConfig(image_size=lowercase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase_ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase_ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCAmelCase = BlipaConfig(vision_config=lowercase_ , text_config=lowercase_ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( lowercase_ , lowercase_=None , lowercase_=False ):
UpperCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCAmelCase = tokenizer('\n' , add_special_tokens=lowercase_ ).input_ids[0]
UpperCAmelCase , UpperCAmelCase = get_blipa_config(lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase = BlipaForConditionalGeneration(lowercase_ ).eval()
UpperCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCAmelCase , UpperCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_model_and_preprocess(
name=lowercase_ , model_type=lowercase_ , is_eval=lowercase_ , device=lowercase_ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(lowercase_ )
if key.startswith('Qformer.bert' ):
UpperCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCAmelCase = key.replace('t5' , 'language' )
UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(lowercase_ , lowercase_ )
UpperCAmelCase , UpperCAmelCase = hf_model.load_state_dict(lowercase_ , strict=lowercase_ )
assert len(lowercase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase = load_demo_image()
UpperCAmelCase = vis_processors['eval'](lowercase_ ).unsqueeze(0 ).to(lowercase_ )
UpperCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase_ )
# create processor
UpperCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase_ , image_std=lowercase_ )
UpperCAmelCase = BlipaProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
UpperCAmelCase = processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase_ , lowercase_ )
original_model.to(lowercase_ )
hf_model.to(lowercase_ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCAmelCase = hf_model(lowercase_ , lowercase_ ).logits
else:
UpperCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase = hf_model(lowercase_ , lowercase_ , labels=lowercase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase_ )
assert torch.allclose(logits[0, :3, :3] , lowercase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase_ )
else:
# cast to same type
UpperCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase_ ) , lowercase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCAmelCase = ''
UpperCAmelCase = tokenizer(lowercase_ , return_tensors='pt' ).input_ids.to(lowercase_ )
UpperCAmelCase = original_model.generate({'image': original_pixel_values} )
UpperCAmelCase = hf_model.generate(
lowercase_ , lowercase_ , do_sample=lowercase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase_ )
UpperCAmelCase = input_ids.shape[1]
UpperCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase_ )
UpperCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
snake_case_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
snake_case_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 78 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : Any = np.random.RandomState(__a )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : int = pipe(**__a ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[int] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Dict = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Dict = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = ort.SessionOptions()
__lowercase : Dict = False
return options
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = """A fantasy landscape, trending on artstation"""
__lowercase : int = np.random.RandomState(0 )
__lowercase : List[str] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : Any = output.images
__lowercase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Any = init_image.resize((768, 512) )
__lowercase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__lowercase : Any = np.random.RandomState(0 )
__lowercase : Optional[Any] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 233 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : str =TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
a__ : Union[str, Any] =TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 148 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_vision_model"""
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=1E-6 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Tuple =hidden_size
a__ : Any =intermediate_size
a__ : Union[str, Any] =num_hidden_layers
a__ : Optional[Any] =num_attention_heads
a__ : List[str] =patch_size
a__ : int =image_size
a__ : Tuple =initializer_range
a__ : Any =attention_dropout
a__ : List[Any] =layer_norm_eps
a__ : Optional[Any] =hidden_act
a__ : Optional[Any] =qkv_bias
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : Optional[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip_qformer"""
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =vocab_size
a__ : Optional[Any] =hidden_size
a__ : str =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : Dict =hidden_act
a__ : Optional[int] =intermediate_size
a__ : Union[str, Any] =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : List[Any] =max_position_embeddings
a__ : Union[str, Any] =initializer_range
a__ : Optional[int] =layer_norm_eps
a__ : int =position_embedding_type
a__ : int =cross_attention_frequency
a__ : Tuple =encoder_hidden_size
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ , a__ : str =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
a__ : Optional[int] =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = """instructblip"""
_lowercase : List[Any] = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
a__ : List[Any] ={}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
a__ : Tuple ={}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
a__ : Dict ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : Dict =InstructBlipVisionConfig(**lowerCAmelCase__ )
a__ : Union[str, Any] =InstructBlipQFormerConfig(**lowerCAmelCase__ )
a__ : Tuple =text_config["model_type"] if "model_type" in text_config else "opt"
a__ : List[str] =CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
a__ : Union[str, Any] =self.text_config.tie_word_embeddings
a__ : Optional[Any] =self.text_config.is_encoder_decoder
a__ : str =num_query_tokens
a__ : List[Any] =self.vision_config.hidden_size
a__ : str =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : List[Any] =1.0
a__ : List[str] =0.02
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : int =copy.deepcopy(self.__dict__ )
a__ : int =self.vision_config.to_dict()
a__ : str =self.qformer_config.to_dict()
a__ : str =self.text_config.to_dict()
a__ : List[str] =self.__class__.model_type
return output
| 148 | 1 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__a :Optional[Any] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
__a :List[Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
__a :int = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
__a :List[str] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
__a :List[str] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
__a :str = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
__a :Tuple = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = randrange(len(lowerCAmelCase__ ) ), randrange(len(lowerCAmelCase__ ) )
A_ = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
A_ , A_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __snake_case ( __UpperCamelCase : str = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase__ ))
@pytest.mark.parametrize("hand, expected" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = PokerHand(lowerCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" ,lowerCAmelCase__ )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" ,generate_random_hands() )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase__ ).compare_with(PokerHand(lowerCAmelCase__ ) ) == expected
def __snake_case ( ):
"""simple docstring"""
A_ = [PokerHand(lowerCAmelCase__ ) for hand in SORTED_HANDS]
A_ = poker_hands.copy()
shuffle(lowerCAmelCase__ )
A_ = chain(sorted(lowerCAmelCase__ ) )
for index, hand in enumerate(lowerCAmelCase__ ):
assert hand == poker_hands[index]
def __snake_case ( ):
"""simple docstring"""
A_ = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowerCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __snake_case ( ):
"""simple docstring"""
A_ = PokerHand("2C 4S AS 3D 5C" )
A_ = True
A_ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __snake_case ( ):
"""simple docstring"""
A_ = 0
A_ = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
A_ = os.path.join(lowerCAmelCase__ ,"poker_hands.txt" )
with open(lowerCAmelCase__ ) as file_hand:
for line in file_hand:
A_ = line[:14].strip()
A_ = line[15:].strip()
A_ , A_ = PokerHand(lowerCAmelCase__ ), PokerHand(lowerCAmelCase__ )
A_ = player.compare_with(lowerCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 376 | 312 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =XLNetTokenizer
lowercase_ : Dict =XLNetTokenizerFast
lowercase_ : str =True
lowercase_ : str =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<s>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<unk>''')
self.assertEqual(vocab_keys[1] ,'''<s>''')
self.assertEqual(vocab_keys[-1] ,'''<eod>''')
self.assertEqual(len(A__) ,1_0_0_6)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(A__)
self.assertListEqual(A__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
lowercase = tokenizer.convert_ids_to_tokens(A__)
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''▁he''', '''ll''', '''o'''])
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def A__ ( self):
lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A__ ( self):
# fmt: off
lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 101 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = True , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
__lowerCamelCase : Optional[Any] = False
if main_process_only:
__lowerCamelCase : Dict = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , disable=SCREAMING_SNAKE_CASE__ )
| 363 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = str(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 9 and set(SCREAMING_SNAKE_CASE__ ) == set('123456789' )
def UpperCamelCase__ ( ):
for base_num in range(9_999 , 4_999 , -1 ):
__lowerCamelCase : Tuple = 100_002 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCamelCase : Union[str, Any] = 1_002_003 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE__ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 194 | 0 |
from __future__ import annotations
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return []
lowerCamelCase , lowerCamelCase : str = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = int(max_value - min_value ) + 1
lowerCamelCase : List[Any] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE_ )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 283 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ (TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any ) -> Optional[Any]:
super().__init__(features=__lowerCamelCase )
a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Dict ) -> Dict:
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> str:
import torch
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a = {"dtype": torch.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
a = np.asarray(__lowerCamelCase )
return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ) -> List[str]:
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ):
a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : dict ) -> str:
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
a = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : pa.Table ) -> "torch.Tensor":
a = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
a = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
a = self.recursive_tensorize(__lowerCamelCase )
a = self._consolidate(__lowerCamelCase )
return column
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : pa.Table ) -> Mapping:
a = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
a = self.python_features_decoder.decode_batch(__lowerCamelCase )
a = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
a = self._consolidate(batch[column_name] )
return batch
| 107 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCamelCase : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCamelCase : str = False
for divisor in range(2 ,int(round(sqrt(lowercase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCamelCase : Any = False
break
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'status' must been from type bool"
return status
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCamelCase : Tuple = list(range(2 ,n + 1 ) )
_UpperCamelCase : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase_ ) ):
for j in range(i + 1 ,len(lowercase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCamelCase : List[str] = 0
# filters actual prime numbers.
_UpperCamelCase : Union[str, Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n > 2), "'N' must been an int and > 2"
_UpperCamelCase : Union[str, Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(lowercase_ ):
ans.append(lowercase_ )
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and number >= 0, "'number' must been an int and >= 0"
_UpperCamelCase : int = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCamelCase : str = 2
_UpperCamelCase : List[Any] = number
if number == 0 or number == 1:
ans.append(lowercase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase_ ):
while quotient != 1:
if is_prime(lowercase_ ) and (quotient % factor == 0):
ans.append(lowercase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase_ )
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type list"
return ans
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase : Optional[Any] = 0
# prime factorization of 'number'
_UpperCamelCase : str = prime_factorization(lowercase_ )
_UpperCamelCase : Optional[Any] = max(lowercase_ )
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type int"
return ans
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCamelCase : Union[str, Any] = 0
# prime factorization of 'number'
_UpperCamelCase : Dict = prime_factorization(lowercase_ )
_UpperCamelCase : int = min(lowercase_ )
# precondition
assert isinstance(lowercase_ ,lowercase_ ), "'ans' must been from type int"
return ans
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,lowercase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,lowercase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
assert (
isinstance(lowercase_ ,lowercase_ ) and (number > 2) and is_even(lowercase_ )
), "'number' must been an int, even and > 2"
_UpperCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCamelCase : Any = get_prime_numbers(lowercase_ )
_UpperCamelCase : int = len(lowercase_ )
# run variable for while-loops.
_UpperCamelCase : Optional[Any] = 0
_UpperCamelCase : List[Any] = None
# exit variable. for break up the loops
_UpperCamelCase : Optional[int] = True
while i < len_pn and loop:
_UpperCamelCase : Union[str, Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCamelCase : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowercase_ ,lowercase_ )
and (len(lowercase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
assert (
isinstance(lowercase_ ,lowercase_ )
and isinstance(lowercase_ ,lowercase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase : List[Any] = 0
while numbera != 0:
_UpperCamelCase : Any = numbera % numbera
_UpperCamelCase : Optional[int] = numbera
_UpperCamelCase : Dict = rest
# precondition
assert isinstance(lowercase_ ,lowercase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowercase__ ( lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
assert (
isinstance(lowercase_ ,lowercase_ )
and isinstance(lowercase_ ,lowercase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCamelCase : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCamelCase : Any = prime_factorization(lowercase_ )
_UpperCamelCase : Optional[Any] = prime_factorization(lowercase_ )
elif numbera == 1 or numbera == 1:
_UpperCamelCase : Dict = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Dict = max(lowercase_ ,lowercase_ )
_UpperCamelCase : Any = 0
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCamelCase : Optional[int] = prime_fac_a.count(lowercase_ )
_UpperCamelCase : Optional[Any] = prime_fac_a.count(lowercase_ )
for _ in range(max(lowercase_ ,lowercase_ ) ):
ans *= n
else:
_UpperCamelCase : int = prime_fac_a.count(lowercase_ )
for _ in range(lowercase_ ):
ans *= n
done.append(lowercase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCamelCase : Tuple = prime_fac_a.count(lowercase_ )
for _ in range(lowercase_ ):
ans *= n
done.append(lowercase_ )
# precondition
assert isinstance(lowercase_ ,lowercase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'number' must been a positive int"
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Optional[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase_ ):
ans += 1
# precondition
assert isinstance(lowercase_ ,lowercase_ ) and is_prime(
lowercase_ ), "'ans' must been a prime number and from type int"
return ans
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
assert (
is_prime(lowercase_ ) and is_prime(lowercase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCamelCase : Any = p_number_a + 1 # jump to the next number
_UpperCamelCase : int = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase_ ):
number += 1
while number < p_number_a:
ans.append(lowercase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowercase_ ):
number += 1
# precondition
assert (
isinstance(lowercase_ ,lowercase_ )
and ans[0] != p_number_a
and ans[len(lowercase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n >= 1), "'n' must been int and >= 1"
_UpperCamelCase : Tuple = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(lowercase_ )
# precondition
assert ans[0] == 1 and ans[len(lowercase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCamelCase : List[Any] = get_divisors(lowercase_ )
# precondition
assert (
isinstance(lowercase_ ,lowercase_ )
and (divisors[0] == 1)
and (divisors[len(lowercase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
assert (
isinstance(lowercase_ ,lowercase_ )
and isinstance(lowercase_ ,lowercase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCamelCase : str = gcd(abs(lowercase_ ) ,abs(lowercase_ ) )
# precondition
assert (
isinstance(lowercase_ ,lowercase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCamelCase : List[Any] = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowercase__ ( lowercase_ ) -> List[str]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCamelCase : Dict = 0
_UpperCamelCase : str = 1
_UpperCamelCase : str = 1 # this will be return
for _ in range(n - 1 ):
_UpperCamelCase : Any = ans
ans += fiba
_UpperCamelCase : Optional[Any] = tmp
return ans
| 358 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[Any] = "xlm-roberta-xl"
def __init__( self : Any , __a : Tuple=25_0880 , __a : Optional[Any]=2560 , __a : List[str]=36 , __a : Any=32 , __a : Dict=1_0240 , __a : Optional[Any]="gelu" , __a : int=0.1 , __a : Tuple=0.1 , __a : str=514 , __a : Any=1 , __a : List[Any]=0.02 , __a : List[str]=1e-0_5 , __a : Optional[Any]=1 , __a : List[Any]=0 , __a : Tuple=2 , __a : int="absolute" , __a : Dict=True , __a : Dict=None , **__a : Tuple , ) -> str:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : Optional[int] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Optional[int] = num_attention_heads
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Union[str, Any] = intermediate_size
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : str = attention_probs_dropout_prob
_UpperCamelCase : Dict = max_position_embeddings
_UpperCamelCase : Optional[Any] = type_vocab_size
_UpperCamelCase : str = initializer_range
_UpperCamelCase : Any = layer_norm_eps
_UpperCamelCase : Any = position_embedding_type
_UpperCamelCase : Union[str, Any] = use_cache
_UpperCamelCase : Optional[Any] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 310 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _snake_case ( snake_case__ : List[Any] ):
A = botoa.client('iam' )
A = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=snake_case__ , AssumeRolePolicyDocument=json.dumps(snake_case__ , indent=2 ) )
A = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=snake_case__ , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(snake_case__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def _snake_case ( snake_case__ : int ):
A = botoa.client('iam' )
return iam_client.get_role(RoleName=snake_case__ )["Role"]["Arn"]
def _snake_case ( ):
A = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , snake_case__ , )
A = None
if credentials_configuration == 0:
A = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
A = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
A = _ask_field('AWS Access Key ID: ' )
A = aws_access_key_id
A = _ask_field('AWS Secret Access Key: ' )
A = aws_secret_access_key
A = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
A = aws_region
A = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , snake_case__ , )
if role_management == 0:
A = _ask_field('Enter your IAM role name: ' )
else:
A = 'accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(snake_case__ )
A = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
A = None
if is_custom_docker_image:
A = _ask_field('Enter your Docker image: ' , lambda snake_case__ : str(snake_case__ ).lower() )
A = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
A = None
if is_sagemaker_inputs_enabled:
A = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda snake_case__ : str(snake_case__ ).lower() , )
A = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
A = None
if is_sagemaker_metrics_enabled:
A = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda snake_case__ : str(snake_case__ ).lower() , )
A = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
A = {}
A = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
if use_dynamo:
A = 'dynamo_'
A = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
if use_custom_options:
A = _ask_options(
'Which mode do you want to use?' , snake_case__ , lambda snake_case__ : TORCH_DYNAMO_MODES[int(snake_case__ )] , default='default' , )
A = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
A = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=snake_case__ , error_message='Please enter yes or no.' , )
A = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
A = _ask_options(
snake_case__ , snake_case__ , lambda snake_case__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(snake_case__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A = _ask_field(snake_case__ , lambda snake_case__ : str(snake_case__ ).lower() , default='ml.p3.2xlarge' )
A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A = _ask_field(
'How many machines do you want use? [1]: ' , snake_case__ , default=1 , )
A = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=snake_case__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=snake_case__ , use_cpu=snake_case__ , dynamo_config=snake_case__ , eca_instance_type=snake_case__ , profile=snake_case__ , region=snake_case__ , iam_role_name=snake_case__ , mixed_precision=snake_case__ , num_machines=snake_case__ , sagemaker_inputs_file=snake_case__ , sagemaker_metrics_file=snake_case__ , ) | 74 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """"""
lowercase__ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str], lowerCamelCase : Optional[DatasetInfo] = None, lowerCamelCase : Optional[str] = None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(self, **lowerCamelCase )
lowercase__ = repo_info
lowercase__ = token
lowercase__ = None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
lowercase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowerCamelCase ): {'''name''': str(lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : str = "rb", **lowerCamelCase : Any, ):
'''simple docstring'''
if not isinstance(self.repo_info, lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowercase__ = hf_hub_url(self.repo_info.id, lowerCamelCase, revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase, mode=lowerCamelCase, headers=get_authentication_headers_for_url(lowerCamelCase, use_auth_token=self.token ), client_kwargs={'''trust_env''': True}, ).open()
def lowercase__ ( self : Dict, lowerCamelCase : Any, **lowerCamelCase : int ):
'''simple docstring'''
self._get_dirs()
lowercase__ = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Optional[int]=False, **lowerCamelCase : str ):
'''simple docstring'''
self._get_dirs()
lowercase__ = PurePosixPath(path.strip('''/''' ) )
lowercase__ = {}
for p, f in self.dir_cache.items():
lowercase__ = PurePosixPath(p.strip('''/''' ) )
lowercase__ = p.parent
if root == path:
lowercase__ = f
lowercase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 207 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=2 , A=8 , A=True , A=True , A=True , A=True , A=99 , A=16 , A=5 , A=2 , A=36 , A="gelu" , A=0.0 , A=0.0 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Any:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_config()
lowerCamelCase = 3_00
return config
def __A ( self ) -> int:
'''simple docstring'''
(
lowerCamelCase
) = self.prepare_config_and_inputs()
lowerCamelCase = True
lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A , A , A , A , A , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = MraModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = MraModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = MraForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = MraForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A , A , A , A , A , A , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MraForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = MraForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A , A , A , A , A , A , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.num_choices
lowerCamelCase = MraForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
lowerCamelCase
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : int = ()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = MraModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = MraModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> Dict:
'''simple docstring'''
return
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCamelCase = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCamelCase = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase = 5_02_65
lowerCamelCase = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase = torch.tensor(
[[[9.2595, -3.6038, 11.88_19], [9.3869, -3.2693, 11.09_56], [11.85_24, -3.4938, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
lowerCamelCase = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase = 5_02_65
lowerCamelCase = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 365 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 116 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__( self, lowerCamelCase__ = 32, lowerCamelCase__ = 64, lowerCamelCase__ = 20, lowerCamelCase__ = 768, lowerCamelCase__=77, lowerCamelCase__=4, lowerCamelCase__ = 0.0, lowerCamelCase__ = "silu", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "linear", lowerCamelCase__ = "prd", lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
super().__init__()
A : int = num_attention_heads
A : Tuple = attention_head_dim
A : Optional[int] = num_attention_heads * attention_head_dim
A : List[str] = additional_embeddings
A : int = time_embed_dim or inner_dim
A : Tuple = embedding_proj_dim or embedding_dim
A : Dict = clip_embed_dim or embedding_dim
A : List[str] = Timesteps(lowerCamelCase__, lowerCamelCase__, 0 )
A : Any = TimestepEmbedding(lowerCamelCase__, lowerCamelCase__, out_dim=lowerCamelCase__, act_fn=lowerCamelCase__ )
A : List[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if embedding_proj_norm_type is None:
A : Union[str, Any] = None
elif embedding_proj_norm_type == "layer":
A : str = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A : Optional[Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
if encoder_hid_proj_type is None:
A : Dict = None
elif encoder_hid_proj_type == "linear":
A : Union[str, Any] = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A : List[str] = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCamelCase__ ) )
if added_emb_type == "prd":
A : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCamelCase__ ) )
elif added_emb_type is None:
A : int = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A : int = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, dropout=lowerCamelCase__, activation_fn="""gelu""", attention_bias=lowerCamelCase__, )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
A : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
A : Tuple = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
A : str = nn.Linear(lowerCamelCase__, lowerCamelCase__ )
A : Union[str, Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
A : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""", lowerCamelCase__, persistent=lowerCamelCase__ )
A : Dict = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
A : Tuple = nn.Parameter(torch.zeros(1, lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowerCAmelCase ( self ):
A : Optional[int] = {}
def fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return processors
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if hasattr(lowerCamelCase__, """set_processor""" ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCamelCase__, lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.set_attn_processor(AttnProcessor() )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = True, ):
A : Optional[int] = hidden_states.shape[0]
A : Tuple = timestep
if not torch.is_tensor(lowerCamelCase__ ):
A : List[Any] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
A : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A : Optional[Any] = timesteps * torch.ones(lowerCamelCase__, dtype=timesteps.dtype, device=timesteps.device )
A : str = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A : Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
A : Tuple = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
A : Optional[Any] = self.embedding_proj_norm(lowerCamelCase__ )
A : Any = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A : List[str] = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
A : Tuple = self.proj_in(lowerCamelCase__ )
A : Dict = self.positional_embedding.to(hidden_states.dtype )
A : List[str] = []
A : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A : Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A : List[str] = hidden_states[:, None, :]
A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A : List[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__, -1, -1 )
additional_embeds.append(lowerCamelCase__ )
A : Union[str, Any] = torch.cat(
lowerCamelCase__, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A : Any = F.pad(
lowerCamelCase__, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
A : Union[str, Any] = hidden_states + positional_embeddings
if attention_mask is not None:
A : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
A : Tuple = F.pad(lowerCamelCase__, (0, self.additional_embeddings), value=0.0 )
A : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A : Any = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
A : str = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
A : Optional[Any] = block(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : Optional[int] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
A : Dict = hidden_states[:, -1]
else:
A : Optional[int] = hidden_states[:, additional_embeddings_len:]
A : Optional[Any] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 116 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCAmelCase__ ( A_ ):
def lowercase ( self : Optional[Any] ):
_snake_case = tempfile.mkdtemp()
_snake_case = 5
# Realm tok
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_snake_case = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
def lowercase ( self : Optional[int] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowercase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = RealmConfig(num_block_records=self.num_block_records )
return config
def lowercase ( self : Union[str, Any] ):
_snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowercase ( self : Optional[int] ):
_snake_case = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_lowerCamelCase , )
return block_records
def lowercase ( self : List[Any] ):
_snake_case = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowercase ( self : Optional[int] ):
_snake_case = self.get_config()
_snake_case = self.get_dummy_retriever()
_snake_case = retriever.tokenizer
_snake_case = np.array([0, 3] , dtype='''long''' )
_snake_case = tokenizer(['''Test question'''] ).input_ids
_snake_case = tokenizer(
['''the fourth'''] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
_snake_case = config.reader_seq_len
_snake_case , _snake_case , _snake_case , _snake_case = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='''np''' )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowercase ( self : int ):
_snake_case = self.get_config()
_snake_case = self.get_dummy_retriever()
_snake_case = retriever.tokenizer
_snake_case = np.array([0, 3, 5] , dtype='''long''' )
_snake_case = tokenizer(['''Test question'''] ).input_ids
_snake_case = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
_snake_case = config.reader_seq_len
_snake_case , _snake_case , _snake_case , _snake_case = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_snake_case = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_snake_case = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_snake_case = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 40 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Optional[int]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : List[str] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Optional[Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Any ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : int ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : str ):
pass
def lowercase ( self : Optional[int] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[int] ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Optional[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : str ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : Optional[int] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 40 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase__ ( A__ ) -> str:
snake_case__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCamelCase__ ( A__ ) -> Dict:
snake_case__ , snake_case__ : Optional[Any] = emb.weight.shape
snake_case__ : Any = nn.Linear(a__ , a__ , bias=a__ )
snake_case__ : int = emb.weight.data
return lin_layer
def UpperCamelCase__ ( A__ ) -> List[Any]:
snake_case__ : List[str] = torch.load(a__ , map_location='cpu' )
snake_case__ : Union[str, Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case__ : Optional[Any] = mam_aaa['model']
remove_ignore_keys_(a__ )
snake_case__ : Dict = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case__ : Dict = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case__ : Union[str, Any] = state_dict['decoder.embed_tokens.weight']
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
snake_case__ : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Any = parser.parse_args()
lowerCAmelCase__ : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 143 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=3_0 , __UpperCamelCase=4_0_0 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_5_5 , __UpperCamelCase=True , ):
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase_ = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase_ = self.size["""shortest_edge"""]
UpperCamelCase_ = self.size["""shortest_edge"""]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
UpperCamelCase_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2, """longest_edge""": 8_4} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCamelCase_ = image_processing_a.pad(__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = image_processing_a(__UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""image_id""": 3_9_7_6_9, """annotations""": target}
# encode them
UpperCamelCase_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target}
UpperCamelCase_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase_ = YolosImageProcessor(format="""coco_panoptic""" )
UpperCamelCase_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
UpperCamelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCamelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
UpperCamelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 122 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['PerceiverFeatureExtractor']
UpperCAmelCase_ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[Any] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : Tuple = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 247 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = AlbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase : List[Any] = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = do_lower_case
__lowerCAmelCase : List[Any] = remove_space
__lowerCAmelCase : List[Any] = keep_accents
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : Dict = False if not self.vocab_file else True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Dict = [self.sep_token_id]
__lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 86 | """simple docstring"""
from PIL import Image
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase__ : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 98 | 0 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return np.maximum(0 , lowerCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 359 |
import os
import string
import sys
lowerCamelCase__ = 1 << 8
lowerCamelCase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowerCamelCase__ = KEYMAP["""up"""]
lowerCamelCase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase__ = []
lowerCamelCase__ = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ = ord(str(i))
def lowerCAmelCase__ ( ) -> Dict:
if os.name == "nt":
import msvcrt
lowerCAmelCase__ : Dict = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(SCREAMING_SNAKE_CASE_ ) == 0:
# Read the keystroke
lowerCAmelCase__ : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase__ : Dict = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase__ : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(SCREAMING_SNAKE_CASE_ )
if ord(SCREAMING_SNAKE_CASE_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase__ : Dict = chr(KEYMAP['esc'] )
except KeyError:
lowerCAmelCase__ : Dict = cha[1]
else:
lowerCAmelCase__ : List[Any] = ch.decode(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase__ : Tuple = sys.stdin.fileno()
lowerCAmelCase__ : Any = termios.tcgetattr(SCREAMING_SNAKE_CASE_ )
try:
tty.setraw(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(SCREAMING_SNAKE_CASE_ , termios.TCSADRAIN , SCREAMING_SNAKE_CASE_ )
return ch
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Any = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["esc"]:
lowerCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) == KEYMAP["mod_int"]:
lowerCAmelCase__ : str = get_raw_chars()
if ord(SCREAMING_SNAKE_CASE_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(SCREAMING_SNAKE_CASE_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(SCREAMING_SNAKE_CASE_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 307 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase : List[Any] = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : str = []
_snake_case : Optional[int] = []
_snake_case : List[str] = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
_snake_case : Optional[Any] = len(__lowercase ) if (len(__lowercase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(__lowercase ) , "Postfix".center(__lowercase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowercase ) == 0:
stack.append(__lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowercase ) # push x to stack
print(
x.center(8 ) , ("".join(__lowercase )).ljust(__lowercase ) , ("".join(__lowercase )).ljust(__lowercase ) , sep=" | " , ) # Output in tabular format
while len(__lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(__lowercase )).ljust(__lowercase ) , ("".join(__lowercase )).ljust(__lowercase ) , sep=" | " , ) # Output in tabular format
return "".join(__lowercase ) # return Postfix as str
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Any = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowercase ) ):
if infix[i] == "(":
_snake_case : int = ")" # change "(" to ")"
elif infix[i] == ")":
_snake_case : str = "(" # change ")" to "("
return (infix_2_postfix("".join(__lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = input('\nEnter an Infix Equation = ') # Input an Infix equation
__SCREAMING_SNAKE_CASE : List[Any] = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)') | 370 | import logging
from transformers import PretrainedConfig
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'bertabs'
def __init__( self , lowercase_=30_522 , lowercase_=512 , lowercase_=6 , lowercase_=512 , lowercase_=8 , lowercase_=512 , lowercase_=0.2 , lowercase_=6 , lowercase_=768 , lowercase_=8 , lowercase_=2_048 , lowercase_=0.2 , **lowercase_ , ):
super().__init__(**lowercase_ )
_snake_case : List[Any] = vocab_size
_snake_case : int = max_pos
_snake_case : Tuple = enc_layers
_snake_case : Optional[Any] = enc_hidden_size
_snake_case : Union[str, Any] = enc_heads
_snake_case : str = enc_ff_size
_snake_case : Any = enc_dropout
_snake_case : Tuple = dec_layers
_snake_case : Optional[Any] = dec_hidden_size
_snake_case : Dict = dec_heads
_snake_case : str = dec_ff_size
_snake_case : List[str] = dec_dropout | 284 | 0 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : str ='''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :int = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
lowercase_ :List[str] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowercase_ :Optional[Any] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] ,num_layers=predefined_args["num_layers"] ,units=predefined_args["units"] ,hidden_size=predefined_args["hidden_size"] ,max_length=predefined_args["max_length"] ,num_heads=predefined_args["num_heads"] ,scaled=predefined_args["scaled"] ,dropout=predefined_args["dropout"] ,output_attention=snake_case_ ,output_all_encodings=snake_case_ ,use_residual=predefined_args["use_residual"] ,activation=predefined_args.get("activation" ,"gelu" ) ,layer_norm_eps=predefined_args.get("layer_norm_eps" ,snake_case_ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowercase_ :List[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowercase_ :Any = os.path.join(get_home_dir() ,"models" )
lowercase_ :Any = _load_vocab(snake_case_ ,snake_case_ ,snake_case_ ,cls=snake_case_ )
lowercase_ :Dict = nlp.model.BERTModel(
snake_case_ ,len(snake_case_ ) ,units=predefined_args["units"] ,embed_size=predefined_args["embed_size"] ,embed_dropout=predefined_args["embed_dropout"] ,word_embed=predefined_args["word_embed"] ,use_pooler=snake_case_ ,use_token_type_embed=snake_case_ ,token_type_vocab_size=predefined_args["token_type_vocab_size"] ,use_classifier=snake_case_ ,use_decoder=snake_case_ ,)
original_bort.load_parameters(snake_case_ ,cast_dtype=snake_case_ ,ignore_extra=snake_case_ )
lowercase_ :Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowercase_ :Dict = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(snake_case_ ),
}
lowercase_ :Dict = BertConfig.from_dict(snake_case_ )
lowercase_ :Optional[int] = BertForMaskedLM(snake_case_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase : Optional[int] ,__lowerCamelCase : int ):
lowercase_ :Dict = hf_param.shape
lowercase_ :Tuple = to_torch(params[gluon_param] )
lowercase_ :Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
lowercase_ :Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"word_embed.0.weight" )
lowercase_ :List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"encoder.position_weight" )
lowercase_ :List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"encoder.layer_norm.beta" )
lowercase_ :Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowercase_ :str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowercase_ :BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowercase_ :BertSelfAttention = layer.attention.self
lowercase_ :Dict = check_and_map_params(
self_attn.key.bias.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
lowercase_ :Optional[int] = check_and_map_params(
self_attn.key.weight.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
lowercase_ :Tuple = check_and_map_params(
self_attn.query.bias.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
lowercase_ :int = check_and_map_params(
self_attn.query.weight.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
lowercase_ :Optional[Any] = check_and_map_params(
self_attn.value.bias.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
lowercase_ :List[Any] = check_and_map_params(
self_attn.value.weight.data ,F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
lowercase_ :BertSelfOutput = layer.attention.output
lowercase_ :Union[str, Any] = check_and_map_params(
self_output.dense.bias ,F'encoder.transformer_cells.{i}.proj.bias' )
lowercase_ :Any = check_and_map_params(
self_output.dense.weight ,F'encoder.transformer_cells.{i}.proj.weight' )
lowercase_ :Tuple = check_and_map_params(
self_output.LayerNorm.bias ,F'encoder.transformer_cells.{i}.layer_norm.beta' )
lowercase_ :str = check_and_map_params(
self_output.LayerNorm.weight ,F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
lowercase_ :BertIntermediate = layer.intermediate
lowercase_ :str = check_and_map_params(
intermediate.dense.bias ,F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
lowercase_ :List[str] = check_and_map_params(
intermediate.dense.weight ,F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
lowercase_ :BertOutput = layer.output
lowercase_ :List[Any] = check_and_map_params(
bert_output.dense.bias ,F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
lowercase_ :Dict = check_and_map_params(
bert_output.dense.weight ,F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
lowercase_ :List[Any] = check_and_map_params(
bert_output.LayerNorm.bias ,F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
lowercase_ :int = check_and_map_params(
bert_output.LayerNorm.weight ,F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowercase_ :int = RobertaTokenizer.from_pretrained("roberta-base" )
lowercase_ :Optional[int] = tokenizer.encode_plus(snake_case_ )['''input_ids''']
# Get gluon output
lowercase_ :str = mx.nd.array([input_ids] )
lowercase_ :Any = original_bort(inputs=snake_case_ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(snake_case_ )
lowercase_ :List[Any] = BertModel.from_pretrained(snake_case_ )
hf_bort_model.eval()
lowercase_ :List[str] = tokenizer.encode_plus(snake_case_ ,return_tensors="pt" )
lowercase_ :Union[str, Any] = hf_bort_model(**snake_case_ )[0]
lowercase_ :str = output_gluon[0].asnumpy()
lowercase_ :Dict = output_hf[0].detach().numpy()
lowercase_ :str = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowercase_ :Optional[int] = np.allclose(snake_case_ ,snake_case_ ,atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" ,snake_case_ )
if __name__ == "__main__":
lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase : Tuple =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 223 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __lowercase ( snake_case_ : Any ,snake_case_ : int ,snake_case_ : List[str]=None ) ->Union[str, Any]:
'''simple docstring'''
if rng is None:
__A : Union[str, Any] = random.Random()
__A : List[str] = 1
for dim in shape:
total_dims *= dim
__A : List[str] = []
for _ in range(snake_case_ ):
values.append(rng.randint(0 ,vocab_size - 1 ) )
__A : Optional[Any] = np.array(snake_case_ ,dtype=jnp.intaa ).reshape(snake_case_ )
return output
def __lowercase ( snake_case_ : int ,snake_case_ : Union[str, Any]=None ) ->int:
'''simple docstring'''
__A : int = ids_tensor(snake_case_ ,vocab_size=2 ,rng=snake_case_ )
# make sure that at least one token is attended to for each batch
__A : str = 1
return attn_mask
@require_flax
class __snake_case :
"""simple docstring"""
_lowerCamelCase = None
_lowerCamelCase = ()
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__A : Optional[int] = 2
__A : Tuple = inputs['''input_ids'''].shape[-1] // 2
__A : List[Any] = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__A : Optional[Any] = jnp.ones_like(__lowerCamelCase )
__A : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__A : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__A : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : List[Any] = self._get_input_ids_and_config()
__A : List[Any] = False
__A : str = max_length
__A : Tuple = 0
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(__lowerCamelCase )
__A : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
__A : Dict = getattr(__lowerCamelCase , __lowerCamelCase )
__A : Tuple = pt_model_class(__lowerCamelCase ).eval()
__A : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
__A : str = flax_model.generate(__lowerCamelCase ).sequences
__A : List[Any] = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__A : Any = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Union[str, Any] = self._get_input_ids_and_config()
__A : List[str] = False
__A : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
__A : Optional[int] = model_class(__lowerCamelCase )
__A : Dict = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Union[str, Any] = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
__A : List[Any] = True
__A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__A : Dict = model_class(__lowerCamelCase )
__A : List[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : List[Any] = jit(model.generate )
__A : List[str] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : List[Any] = self._get_input_ids_and_config()
__A : List[str] = False
__A : Any = max_length
__A : List[Any] = 2
for model_class in self.all_generative_model_classes:
__A : Tuple = model_class(__lowerCamelCase )
__A : Optional[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Union[str, Any] = jit(model.generate )
__A : Optional[int] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Tuple = self._get_input_ids_and_config()
__A : str = False
__A : Union[str, Any] = max_length
__A : List[Any] = 2
__A : Any = 2
for model_class in self.all_generative_model_classes:
__A : List[str] = model_class(__lowerCamelCase )
__A : Optional[int] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Any = self._get_input_ids_and_config()
__A : Optional[Any] = True
__A : Union[str, Any] = max_length
__A : List[str] = 0.8
__A : List[str] = 10
__A : Union[str, Any] = 0.3
__A : Union[str, Any] = 1
__A : Optional[Any] = 8
__A : Dict = 9
for model_class in self.all_generative_model_classes:
__A : List[Any] = model_class(__lowerCamelCase )
__A : List[str] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Any = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Union[str, Any] = self._get_input_ids_and_config()
__A : Union[str, Any] = max_length
__A : List[str] = 1
__A : str = 8
__A : Any = 9
for model_class in self.all_generative_model_classes:
__A : Union[str, Any] = model_class(__lowerCamelCase )
__A : Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Optional[Any] = jit(model.generate )
__A : Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
__A : Optional[int] = max_length
__A : List[str] = 2
__A : List[Any] = 1
__A : Optional[Any] = 8
__A : str = 9
for model_class in self.all_generative_model_classes:
__A : Optional[int] = model_class(__lowerCamelCase )
__A : int = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : str = jit(model.generate )
__A : Any = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : int = self._get_input_ids_and_config()
# pad attention mask on the left
__A : Dict = attention_mask.at[(0, 0)].set(0 )
__A : Optional[Any] = False
__A : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
__A : int = model_class(__lowerCamelCase )
__A : Union[str, Any] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : Optional[int] = jit(model.generate )
__A : Any = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : str = attention_mask.at[(0, 0)].set(0 )
__A : List[Any] = True
__A : Any = max_length
for model_class in self.all_generative_model_classes:
__A : str = model_class(__lowerCamelCase )
__A : List[Any] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : List[str] = jit(model.generate )
__A : Optional[int] = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A , __A , __A , __A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
__A : List[str] = attention_mask.at[(0, 0)].set(0 )
__A : Optional[int] = 2
__A : Dict = max_length
for model_class in self.all_generative_model_classes:
__A : Any = model_class(__lowerCamelCase )
__A : int = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
__A : str = jit(model.generate )
__A : str = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__A : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__A : Any = '''Hello world'''
__A : Dict = tokenizer(__lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , '''do_samples''' ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , '''foo''' ):
__A : Any = {'''foo''': '''bar'''}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 179 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableDiffusionSAGPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = False
def lowercase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : int = CLIPTextModel(__snake_case )
a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Tuple=0 ):
if str(__snake_case ).startswith('mps' ):
a : Dict = torch.manual_seed(__snake_case )
else:
a : int = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : List[Any] = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
a : Any = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : Any = '.'
a : Optional[int] = torch.manual_seed(0 )
a : Optional[Any] = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
a : Dict = output.images
a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a : Optional[int] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : int ):
a : str = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a : List[Any] = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : str = '.'
a : str = torch.manual_seed(0 )
a : Union[str, Any] = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
a : Any = output.images
a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a : Optional[int] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a : Optional[int] = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
a : int = '.'
a : Dict = torch.manual_seed(0 )
a : Any = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
a : List[str] = output.images
assert image.shape == (1, 5_12, 7_68, 3) | 353 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output | 96 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Dict = JukeboxTokenizer
lowercase_ : Dict = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
import torch
_lowercase : str = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics')
_lowercase : Optional[Any] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
torch.tensor([[0, 0, 0, 10_69, 11]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
@require_torch
def UpperCamelCase ( self) -> int:
"""simple docstring"""
import torch
_lowercase : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics')
_lowercase : List[str] = tokenizer(**self.metas)['input_ids']
# fmt: off
_lowercase : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0], EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1], EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2], EXPECTED_OUTPUT[2]))
| 21 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : str ) -> YolosConfig:
_snake_case = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_snake_case = 1_92
_snake_case = 7_68
_snake_case = 12
_snake_case = 3
_snake_case = [8_00, 13_33]
_snake_case = False
elif yolos_name == "yolos_s_dWr":
_snake_case = 3_30
_snake_case = 14
_snake_case = 6
_snake_case = 13_20
elif "yolos_s" in yolos_name:
_snake_case = 3_84
_snake_case = 15_36
_snake_case = 12
_snake_case = 6
elif "yolos_b" in yolos_name:
_snake_case = [8_00, 13_44]
_snake_case = 91
_snake_case = '''huggingface/label-files'''
_snake_case = '''coco-detection-id2label.json'''
_snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( __lowerCamelCase : dict , __lowerCamelCase : YolosConfig , __lowerCamelCase : bool = False ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[: config.hidden_size, :]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[-config.hidden_size :, :]
_snake_case = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
if "backbone" in name:
_snake_case = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
_snake_case = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
_snake_case = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
_snake_case = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
_snake_case = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
_snake_case = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_snake_case = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
_snake_case = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
_snake_case = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
_snake_case = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _UpperCAmelCase ( __lowerCamelCase : dict , __lowerCamelCase : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_snake_case = key.split('''.''' )
_snake_case = int(key_split[2] )
_snake_case = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[
dim : dim * 2, :
]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = val
return orig_state_dict
def _UpperCAmelCase ( ) -> torch.Tensor:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Dict:
_snake_case = get_yolos_config(__lowerCamelCase )
# load original state_dict
_snake_case = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
_snake_case = YolosForObjectDetection(__lowerCamelCase )
model.eval()
_snake_case = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
_snake_case = 8_00 if yolos_name != '''yolos_ti''' else 5_12
_snake_case = YolosImageProcessor(format='''coco_detection''' , size=__lowerCamelCase )
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = model(**__lowerCamelCase )
_snake_case , _snake_case = outputs.logits, outputs.pred_boxes
_snake_case , _snake_case = None, None
if yolos_name == "yolos_ti":
_snake_case = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
_snake_case = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
_snake_case = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
_snake_case = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
_snake_case = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
_snake_case = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
_snake_case = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
_snake_case = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
_snake_case = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
_snake_case = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
_snake_case = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
_snake_case = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
model.push_to_hub(__lowerCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 1 |
"""simple docstring"""
__A = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__A = [{"type": "code", "content": INSTALL_CONTENT}]
__A = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 148 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def UpperCamelCase__ ( lowercase__ : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
_UpperCAmelCase : Tuple = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_UpperCAmelCase : Tuple = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_UpperCAmelCase : List[str] = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_UpperCAmelCase : List[Any] = i, i + z_result[i] - 1
return z_result
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_UpperCAmelCase : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if token is not None:
_UpperCAmelCase : str = {"Accept": "application/vnd.github+json", "Authorization": f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
_UpperCAmelCase : Any = "636036"
_UpperCAmelCase : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
_UpperCAmelCase : Tuple = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json()
return result["workflow_runs"]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = get_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCAmelCase : str = workflow_run["id"]
break
return workflow_run_id
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE__ )
if workflow_run_id is not None:
_UpperCAmelCase : Any = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCAmelCase : List[str] = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE__ , artifact_url=SCREAMING_SNAKE_CASE__ , output_dir=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Any = {}
for artifact_name in artifact_names:
_UpperCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , f'{artifact_name}.zip' )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : str = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
_UpperCAmelCase : List[str] = f.read().decode("UTF-8" )
return results
| 202 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowercase = '''base_with_context'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__UpperCamelCase :str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase :str = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Tuple = ly_weight['''attention''']
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :List[str] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase :int = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :str = ly_weight['''attention''']
__UpperCamelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__snake_case )
__UpperCamelCase :Dict = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase :Union[str, Any] = weights[f"""layers_{lyr_num}"""]
__UpperCamelCase :Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = ly_weight['''self_attention''']
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :int = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :Any = ly_weight['''MultiHeadDotProductAttention_0''']
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCamelCase :str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCamelCase :Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__UpperCamelCase :Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCamelCase :int = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCamelCase :Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCamelCase :Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCamelCase :Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__UpperCamelCase :List[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase :List[str] = jnp.tree_util.tree_map(onp.array , __snake_case )
__UpperCamelCase :str = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__UpperCamelCase :List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__UpperCamelCase :Union[str, Any] = inference.parse_training_gin_file(__snake_case , __snake_case )
__UpperCamelCase :Optional[Any] = inference.InferenceModel(args.checkpoint_path , __snake_case )
__UpperCamelCase :Optional[Any] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__UpperCamelCase :Tuple = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase :List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCamelCase :List[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase :List[str] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __snake_case )
__UpperCamelCase :Optional[Any] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __snake_case )
__UpperCamelCase :Optional[Any] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , __snake_case )
__UpperCamelCase :Union[str, Any] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__UpperCamelCase :Union[str, Any] = SpectrogramDiffusionPipeline(
notes_encoder=__snake_case , continuous_encoder=__snake_case , decoder=__snake_case , scheduler=__snake_case , melgan=__snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowercase = parser.parse_args()
main(args)
| 43 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ):
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_pad
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase__ = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ = self.size['shortest_edge']
lowerCAmelCase__ = self.size['shortest_edge']
else:
lowerCAmelCase__ = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase__ = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Dict = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
lowerCAmelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image and target
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor()
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# prepare image, target and masks_path
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
lowerCAmelCase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
lowerCAmelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
lowerCAmelCase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
lowerCAmelCase__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
lowerCAmelCase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
lowerCAmelCase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 122 | 1 |
def lowerCamelCase__ ( _A = 4000000 ):
'''simple docstring'''
snake_case_ = []
snake_case_ , snake_case_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
snake_case_ , snake_case_ = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = BioGptTokenizer
_lowercase = False
def snake_case_ ( self: Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def snake_case_ ( self: Optional[int],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = 'lower newer'
__UpperCamelCase = 'lower newer'
return input_text, output_text
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer(self.vocab_file,self.merges_file )
__UpperCamelCase = 'lower'
__UpperCamelCase = ['low', 'er</w>']
__UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokens + ['<unk>']
__UpperCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ),A_ )
@slow
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase = tokenizer.encode('sequence builders',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.encode('multi-sequence build',add_special_tokens=A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 310 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCamelCase : List[str] = None
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCamelCase : Optional[Any] = {
'camembert-base': 5_1_2,
}
lowerCamelCase : int = '▁'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = CamembertTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=["<s>NOTUSED", "</s>NOTUSED"] , **A , ) -> Any:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , **A , )
snake_case : Optional[Any] = vocab_file
snake_case : Union[str, Any] = False if not self.vocab_file else True
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : List[Any] = [self.cls_token_id]
snake_case : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Any = [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Union[str, Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 176 |
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Optional[int] = []
snake_case : Tuple = 1
while len(lowercase ) < 1E6:
constant.append(str(lowercase ) )
i += 1
snake_case : int = """""".join(lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 176 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A = 10 , __A = 22 ) -> int:
'''simple docstring'''
UpperCamelCase__ = range(1 , __A )
UpperCamelCase__ = range(1 , __A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 80 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Tuple ) -> Optional[Any]:
snake_case_ :Optional[int] = {}
def lowerCAmelCase_ ( self: Dict , snake_case: str ) -> None:
snake_case_ :str = {}
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str , snake_case: str , snake_case: float ) -> None:
if nodea not in self.connections:
self.add_node(snake_case )
if nodea not in self.connections:
self.add_node(snake_case )
snake_case_ :Dict = probability
def lowerCAmelCase_ ( self: List[Any] ) -> list[str]:
return list(self.connections )
def lowerCAmelCase_ ( self: Any , snake_case: str ) -> str:
snake_case_ :Optional[Any] = 0
snake_case_ :List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_lowercase, _lowercase, _lowercase )
snake_case_ :int = Counter(graph.get_nodes() )
snake_case_ :Optional[Any] = start
for _ in range(_lowercase ):
snake_case_ :Tuple = graph.transition(_lowercase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__snake_case : List[Any] = []
__snake_case : Tuple = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__snake_case : List[str] = factorials.pop()
__snake_case : str = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
from scipy.stats import spearmanr
import datasets
_snake_case : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case : Optional[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Dict:
__snake_case : Optional[int] = spearmanr(lowerCamelCase , lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = XGLMTokenizer
UpperCAmelCase : Dict = XGLMTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Any = True
def __snake_case ( self : int):
super().setUp()
# We have a SentencePiece fixture for testing
a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : str):
a : str = "<pad>"
a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(__UpperCAmelCase) , 1008)
def __snake_case ( self : int):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def __snake_case ( self : Union[str, Any]):
a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : str = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : int = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : int = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : str):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def __snake_case ( self : Optional[Any]):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name)
a : List[str] = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase)
a : str = pickle.dumps(__UpperCAmelCase)
pickle.loads(__UpperCAmelCase)
def __snake_case ( self : List[str]):
if not self.test_rust_tokenizer:
return
a : Dict = self.get_tokenizer()
a : Any = self.get_rust_tokenizer()
a : Optional[Any] = "I was born in 92000, and this is falsé."
a : Any = tokenizer.tokenize(__UpperCAmelCase)
a : Optional[Any] = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : Optional[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : str = self.get_rust_tokenizer()
a : int = tokenizer.encode(__UpperCAmelCase)
a : str = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
@slow
def __snake_case ( self : Optional[int]):
a : Tuple = "Hello World!"
a : Union[str, Any] = [2, 31227, 4447, 35]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Any):
a : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Optional[Any]):
# fmt: off
a : List[Any] = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/xglm-564M" , padding=__UpperCAmelCase , )
| 40 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_2 , _A=3 , _A=4 , _A=[1_0, 2_0, 3_0, 4_0] , _A=[2, 2, 3, 2] , _A=True , _A=True , _A=3_7 , _A="gelu" , _A=1_0 , _A=0.02 , _A=["stage2", "stage3", "stage4"] , _A=[2, 3, 4] , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = ConvNextVaModel(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = ConvNextVaForImageClassification(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextVaBackbone(config=_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_a : Optional[int] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_a : Optional[int] = False
_a : Dict = False
_a : Tuple = False
_a : List[str] = False
_a : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ConvNextVaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowerCAmelCase = True
if model_class.__name__ in [
*get_values(_A ),
*get_values(_A ),
]:
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowerCAmelCase = False
__lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(_A ), *get_values(_A )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
__lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A )
__lowerCAmelCase = model(**_A ).loss
loss.backward()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
def check_hidden_states_output(_A , _A , _A ):
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(_A , _A ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_A )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = preprocessor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
__lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
__lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
__lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
__lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
__lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
__lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
__lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
__lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
__lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
__lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
__lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
__lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
__lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
__lowerCAmelCase = value
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=None ):
if config_path is not None:
__lowerCAmelCase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = FlavaConfig()
__lowerCAmelCase = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
__lowerCAmelCase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
__lowerCAmelCase = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = hf_model.state_dict()
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCamelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 102 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> List[Any]:
A__ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
A__ = AutoModelForSeqaSeqLM.from_config(lowercase_ )
model.save_pretrained(lowercase_ )
AutoTokenizer.from_pretrained(lowercase_ ).save_pretrained(lowercase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 247 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE = LinearRegression()
pol_reg.fit(X_poly, y)
def _SCREAMING_SNAKE_CASE ( ) -> int:
plt.scatter(lowercase_ , lowercase_ , color="red" )
plt.plot(lowercase_ , pol_reg.predict(poly_reg.fit_transform(lowercase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 247 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase : Tuple = logging.get_logger(__name__)
def lowerCAmelCase__ ( _a : List[str]=None , _a : List[Any]=None ):
return field(default_factory=lambda: default , metadata=_a )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
A : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
A : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Benchmark training of model'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Verbose memory tracing'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Trace memory line by line'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Save result to a CSV file'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Save all print statements in a log file'} )
A : bool = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to print environment information'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
A : str = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
A : str = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
A : str = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
A : str = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
A : str = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={'help': 'CSV filename used if saving environment information.'} , )
A : str = field(
default=F"""log_{round(time() )}.csv""" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
A : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def _lowerCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , _SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self ) -> Any:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _lowerCAmelCase ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 362 |
def lowerCAmelCase__ ( _a : int = 50 ):
snake_case_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 36 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> int:
__lowerCamelCase = 384
if "tiny" in model_name:
__lowerCamelCase = [3, 3, 9, 3]
__lowerCamelCase = [96, 192, 384, 768]
if "small" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [96, 192, 384, 768]
if "base" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [128, 256, 512, 1024]
__lowerCamelCase = 512
if "large" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [192, 384, 768, 1536]
__lowerCamelCase = 768
if "xlarge" in model_name:
__lowerCamelCase = [3, 3, 27, 3]
__lowerCamelCase = [256, 512, 1024, 2048]
__lowerCamelCase = 1024
# set label information
__lowerCamelCase = 150
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''ade20k-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = ConvNextConfig(
depths=__lowerCAmelCase , hidden_sizes=__lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__lowerCamelCase = UperNetConfig(
backbone_config=__lowerCAmelCase , auxiliary_in_channels=__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def __magic_name__ ( __lowerCAmelCase : str ) -> Optional[Any]:
__lowerCamelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Any:
__lowerCamelCase = dct.pop(__lowerCAmelCase )
__lowerCamelCase = val
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> int:
__lowerCamelCase = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__lowerCamelCase = model_name_to_url[model_name]
__lowerCamelCase = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )['''state_dict''']
__lowerCamelCase = get_upernet_config(__lowerCAmelCase )
__lowerCamelCase = UperNetForSemanticSegmentation(__lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
if "bn" in key:
__lowerCamelCase = key.replace('''bn''' , '''batch_norm''' )
__lowerCamelCase = val
# rename keys
__lowerCamelCase = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# verify on image
__lowerCamelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
__lowerCamelCase = SegformerImageProcessor()
__lowerCamelCase = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowerCamelCase = model(__lowerCAmelCase )
if model_name == "upernet-convnext-tiny":
__lowerCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__lowerCamelCase = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__lowerCamelCase = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__lowerCamelCase = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__lowerCamelCase = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 270 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''falcon'''
A__ = ['''past_key_values''']
def __init__(self : str , _UpperCAmelCase : Dict=6_5024 , _UpperCAmelCase : Optional[Any]=4544 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Optional[Any]=71 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Optional[int]=11 , _UpperCAmelCase : Optional[Any]=11 , **_UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__ = kwargs.pop("""n_embed""" , _UpperCAmelCase )
lowercase__ = hidden_size if n_embed is None else n_embed
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase__ = alibi
lowercase__ = new_decoder_architecture
lowercase__ = multi_query # Ignored when new_decoder_architecture is True
lowercase__ = parallel_attn
lowercase__ = bias
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
return not self.alibi
| 305 | 0 |
'''simple docstring'''
from math import factorial
__a = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( a_: int ):
return sum(DIGIT_FACTORIAL[d] for d in str(a_ ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3, a_ ) if sum_of_digit_factorial(a_ ) == i )
if __name__ == "__main__":
print(f'{solution() = }') | 17 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase__ ( _UpperCamelCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ ,'''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ ,'''depth_multiplier''' ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=3 ,__lowerCamelCase=32 ,__lowerCamelCase=0.25 ,__lowerCamelCase=8 ,__lowerCamelCase=True ,__lowerCamelCase=10_24 ,__lowerCamelCase=32 ,__lowerCamelCase="relu6" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.02 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=10 ,__lowerCamelCase=None ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Any = depth_multiplier
lowerCAmelCase__ : int = min_depth
lowerCAmelCase__ : Dict = tf_padding
lowerCAmelCase__ : Tuple = int(last_hidden_size * depth_multiplier )
lowerCAmelCase__ : Tuple = output_stride
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Optional[Any] = classifier_dropout_prob
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : Union[str, Any] = scope
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] ,self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = MobileNetVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.num_labels
lowerCAmelCase__ : Tuple = MobileNetVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ : Any = model(lowerCAmelCase_ ,labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
snake_case_ =(MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ =(
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ =False
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = MobileNetVaModelTester(self )
lowerCAmelCase__ : Optional[Any] = MobileNetVaConfigTester(self ,config_class=lowerCAmelCase_ ,has_text_modality=lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(lowerCAmelCase_ )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Optional[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(lowerCAmelCase_ ,lowerCAmelCase_ ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : Tuple = 26
self.assertEqual(len(lowerCAmelCase_ ) ,lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=lowerCAmelCase_ ,return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**lowerCAmelCase_ )
# verify the logits
lowerCAmelCase__ : Any = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase_ )
lowerCAmelCase__ : Tuple = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase_ ,atol=1e-4 ) )
| 129 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected', [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(lowerCAmelCase_, i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
__lowerCAmelCase = _distribute_shards(**lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected', [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
], )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = _split_gen_kwargs(lowerCAmelCase_, lowerCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected', [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any ):
if expected is RuntimeError:
with pytest.raises(lowerCAmelCase_ ):
_number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
else:
__lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCAmelCase_ )
assert out == expected
| 284 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
@experimental
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return _map_with_joblib(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = num_proc if num_proc <= len(_lowerCamelCase ) else len(_lowerCamelCase )
UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase ):
UpperCamelCase = len(_lowerCamelCase ) // num_proc
UpperCamelCase = len(_lowerCamelCase ) % num_proc
UpperCamelCase = div * index + min(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(_lowerCamelCase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(_lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
UpperCamelCase = None, None
if not disable_tqdm:
UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase , initargs=_lowerCamelCase , initializer=_lowerCamelCase ) as pool:
UpperCamelCase = pool.map(_lowerCamelCase , _lowerCamelCase )
logger.info(F"""Finished {num_proc} processes""" )
UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(_lowerCamelCase )} objects""" )
return mapped
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=_lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
UpperCamelCase = None
| 370 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowerCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : str ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Tuple ):
"""simple docstring"""
return (4, 3_2, 3_2)
@property
def A ( self : Dict ):
"""simple docstring"""
return (4, 3_2, 3_2)
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
UpperCamelCase = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : str , UpperCamelCase__ : Any=(3_2, 3_2) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(UpperCamelCase__ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (2_5_6, 2_5_6)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
| 249 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
__lowerCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCAmelCase = [2, 4, 1, 5]
__lowerCAmelCase = len(train_data)
__lowerCAmelCase = 0.009
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_="train" ) -> Tuple:
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output(
lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : int = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=m ) -> List[str]:
_a : Any = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple:
_a : Tuple = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m
return cost_derivative_value
def __lowerCamelCase ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_a : int = 0.000_002
_a : str = 0
_a : Optional[Any] = 0
while True:
j += 1
_a : Tuple = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = get_cost_derivative(i - 1 )
_a : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
_a : Any = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowerCamelCase ( ) -> str:
for i in range(len(lowerCAmelCase_ ) ):
print(('Actual output value:', output(lowerCAmelCase_ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCAmelCase_ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 89 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class a ( lowerCamelCase_ ):
snake_case__ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case__ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case__ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case__ = "question"
snake_case__ = "context"
snake_case__ = "answers"
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 367 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=37 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]="None" , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[Any]=None , ):
a : Tuple = parent
a : Dict = batch_size
a : List[Any] = seq_length
a : Optional[Any] = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : Any = use_labels
a : Optional[int] = vocab_size
a : str = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : str = type_vocab_size
a : List[Any] = type_sequence_label_size
a : List[Any] = initializer_range
a : Any = num_labels
a : Dict = num_choices
a : Any = relative_attention
a : Union[str, Any] = position_biased_input
a : Tuple = pos_att_type
a : List[Any] = scope
def __snake_case ( self : int):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a : Dict = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Optional[Any] = None
a : Dict = None
a : List[str] = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Tuple = ids_tensor([self.batch_size] , self.num_choices)
a : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self : Tuple):
a : Any = self.get_config()
a : int = 300
return config
def __snake_case ( self : Dict , __UpperCAmelCase : List[str]):
self.parent.assertListEqual(list(result.loss.size()) , [])
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
a : Dict = DebertaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)[0]
a : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase)[0]
a : Any = model(__UpperCAmelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : int):
a : Any = DebertaForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]):
a : List[str] = self.num_labels
a : Union[str, Any] = DebertaForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : str):
a : Optional[Any] = self.num_labels
a : Tuple = DebertaForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]):
a : Optional[int] = DebertaForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : int):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : List[Any] = True
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = False
UpperCAmelCase : str = False
def __snake_case ( self : List[str]):
a : int = DebertaModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : Tuple):
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any]):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : int):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase)
def __snake_case ( self : str):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : str):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = DebertaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet")
def __snake_case ( self : int):
pass
@slow
def __snake_case ( self : int):
a : Dict = DebertaModel.from_pretrained("microsoft/deberta-base")
a : Optional[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
a : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
# compare the actual values for a slice.
a : Optional[int] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4) , f'''{output[:, 1:4, 1:4]}''')
| 40 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40 | 1 |
from __future__ import annotations
lowerCamelCase_ : str = 10
def lowerCAmelCase( __lowerCamelCase ):
__a = 1
__a = max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__a = [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__a = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
__a = 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
__a = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197 | 1 |
'''simple docstring'''
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =min(__snake_case ) # min() finds the minimum value
__lowercase =max(__snake_case ) # max() finds the maximum value
__lowercase =max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase =[0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__snake_case , __snake_case ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase =0
for count in range(__snake_case ):
while holes[count] > 0:
holes[count] -= 1
__lowercase =count + min_val
i += 1
def _A ( ):
"""simple docstring"""
__lowercase =[8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__snake_case )
print('Sorted order is:' , ' '.join(__snake_case ) )
if __name__ == "__main__":
main()
| 166 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Optional[int] = logging.getLogger(__name__)
class a__ ( a_ ):
def __init__( self , _a=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowercase : List[str] = label_idx
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : Optional[Any] = mode.value
lowercase : List[str] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : str = 1
lowercase : Optional[int] = []
with open(_a , encoding="utf-8" ) as f:
lowercase : List[Any] = []
lowercase : Optional[int] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
lowercase : int = []
lowercase : int = []
else:
lowercase : Optional[Any] = line.split(" " )
words.append(splits[0] )
if len(_a ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : List[str] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(_a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase : Any = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(_a )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Optional[Any] = f.read().splitlines()
if "O" not in labels:
lowercase : List[Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( a_ ):
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
lowercase : Tuple = f.read().splitlines()
if "O" not in labels:
lowercase : Optional[int] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( a_ ):
def __magic_name__ ( self , _a , _a ):
if isinstance(_a , _a ):
lowercase : List[Any] = mode.value
lowercase : Optional[int] = os.path.join(_a , f"""{mode}.txt""" )
lowercase : Tuple = 1
lowercase : List[str] = []
with open(_a , encoding="utf-8" ) as f:
for sentence in parse_incr(_a ):
lowercase : Optional[Any] = []
lowercase : str = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(_a ) == len(_a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=_a , labels=_a ) )
guid_index += 1
return examples
def __magic_name__ ( self , _a , _a , _a ):
lowercase : str = 0
for sentence in parse_incr(_a ):
lowercase : List[Any] = preds_list[example_id]
lowercase : List[str] = ""
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_a )
example_id += 1
def __magic_name__ ( self , _a ):
if path:
with open(_a , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 202 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : List[str]="<|endoftext|>" , __UpperCamelCase : Dict=False , **__UpperCamelCase : Optional[int] , ) -> str:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__UpperCamelCase )
_UpperCamelCase = add_prefix_space
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 54 | """simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str=13 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : str=99 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[Any]=4 , ) -> Optional[int]:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self : List[Any] ) -> Any:
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = True
snake_case__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
_UpperCamelCase = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__UpperCamelCase )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(__UpperCamelCase )[0]
_UpperCamelCase = 5_0000
_UpperCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCamelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 54 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = size if size is not None else {"""shortest_edge""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 122 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,):
snake_case_ : str = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Union[str, Any] = use_input_mask
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : List[Any] = projection_dim
snake_case_ : Dict = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : int = dropout
snake_case_ : int = attention_dropout
snake_case_ : Dict = max_position_embeddings
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
snake_case_ : Union[str, Any] = bos_token_id
def a__ ( self :Any ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : Union[str, Any] = None
if self.use_input_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case_ : int = input_mask.numpy()
snake_case_ , snake_case_ : Tuple = input_mask.shape
snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCamelCase ):
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = 0
snake_case_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCamelCase )
def a__ ( self :str ):
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ):
snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :List[str] ):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
lowercase : int = False
lowercase : List[Any] = False
lowercase : Dict = False
def a__ ( self :List[Any] ):
snake_case_ : List[str] = BlipTextModelTester(self )
snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Union[str, Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Tuple ):
pass
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def a__ ( self :Any ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def a__ ( self :List[Any] ):
pass
@slow
def a__ ( self :Any ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase ) | 8 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =XLMProphetNetTokenizer
A__ : List[Any] =False
A__ : Tuple =True
def A_ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = '[PAD]'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCAmelCase_ ) , 1012 )
def A_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = XLMProphetNetTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Optional[Any] ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [35389, 6672, 49, 2]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 176 |
import random
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase_ )
elif element > pivot:
greater.append(UpperCamelCase_ )
else:
equal.append(UpperCamelCase_ )
return less, equal, greater
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
if index >= len(UpperCamelCase_ ) or index < 0:
return None
SCREAMING_SNAKE_CASE__ = items[random.randint(0 , len(UpperCamelCase_ ) - 1 )]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _partition(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase_ , UpperCamelCase_ )
# must be in larger
else:
return quick_select(UpperCamelCase_ , index - (m + count) )
| 176 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Dict = tempfile.mkdtemp()
__UpperCamelCase :Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__UpperCamelCase :Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , __lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__lowercase , __lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Union[str, Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Tuple:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = self.get_tokenizer()
__UpperCamelCase :int = self.get_rust_tokenizer()
__UpperCamelCase :Optional[int] = self.get_image_processor()
__UpperCamelCase :Optional[int] = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCamelCase :Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase)
__UpperCamelCase :int = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCamelCase :int = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __lowercase)
self.assertIsInstance(processor_fast.tokenizer , __lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __lowercase)
self.assertIsInstance(processor_fast.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :List[str] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0)
__UpperCamelCase :Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = self.get_image_processor()
__UpperCamelCase :int = self.get_tokenizer()
__UpperCamelCase :Any = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Dict = self.prepare_image_inputs()
__UpperCamelCase :Dict = image_processor(__lowercase , return_tensors='''np''')
__UpperCamelCase :List[str] = processor(images=__lowercase , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[str] = self.get_image_processor()
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :List[str] = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :int = '''lower newer'''
__UpperCamelCase :Dict = processor(text=__lowercase)
__UpperCamelCase :Any = tokenizer(__lowercase , padding='''max_length''' , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = self.get_image_processor()
__UpperCamelCase :List[str] = self.get_tokenizer()
__UpperCamelCase :Optional[Any] = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[int] = '''lower newer'''
__UpperCamelCase :Tuple = self.prepare_image_inputs()
__UpperCamelCase :Union[str, Any] = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[Any] = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Union[str, Any] = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Any = processor.batch_decode(__lowercase)
__UpperCamelCase :Dict = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Any = self.get_image_processor()
__UpperCamelCase :Optional[int] = self.get_tokenizer()
__UpperCamelCase :int = AlignProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :List[str] = '''lower newer'''
__UpperCamelCase :List[str] = self.prepare_image_inputs()
__UpperCamelCase :str = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 367 | from __future__ import annotations
from math import pi
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A ( lowercase_ ):
"""simple docstring"""
lowerCamelCase = 'pegasus'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int],lowercase_ : Union[str, Any]=5_0_2_6_5,lowercase_ : Union[str, Any]=1_0_2_4,lowercase_ : int=1_2,lowercase_ : Dict=4_0_9_6,lowercase_ : str=1_6,lowercase_ : int=1_2,lowercase_ : Tuple=4_0_9_6,lowercase_ : Tuple=1_6,lowercase_ : Any=0.0,lowercase_ : Dict=0.0,lowercase_ : int=True,lowercase_ : List[str]=True,lowercase_ : List[Any]="gelu",lowercase_ : Tuple=1_0_2_4,lowercase_ : Any=0.1,lowercase_ : Dict=0.0,lowercase_ : List[Any]=0.0,lowercase_ : Any=0.02,lowercase_ : List[str]=0,lowercase_ : List[Any]=False,lowercase_ : Dict=0,lowercase_ : int=1,lowercase_ : Optional[Any]=1,**lowercase_ : List[str],)-> List[Any]:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_,eos_token_id=lowerCAmelCase_,is_encoder_decoder=lowerCAmelCase_,decoder_start_token_id=lowerCAmelCase_,forced_eos_token_id=lowerCAmelCase_,**lowerCAmelCase_,)
@property
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
return self.d_model
| 7 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'camembert'
def __init__( self : int , lowerCAmelCase_ : Tuple=3_05_22 , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Tuple=1e-12 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict="absolute" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[int] =vocab_size
A__ : Any =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : Optional[int] =num_attention_heads
A__ : Optional[int] =hidden_act
A__ : Optional[Any] =intermediate_size
A__ : Tuple =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : List[str] =max_position_embeddings
A__ : int =type_vocab_size
A__ : int =initializer_range
A__ : Tuple =layer_norm_eps
A__ : Dict =position_embedding_type
A__ : List[str] =use_cache
A__ : Dict =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Optional[int] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[str] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 134 | 0 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
_enforce_args(UpperCamelCase , UpperCamelCase )
if n == 0:
return 0
_a = float('''-inf''' )
for i in range(1 , n + 1 ):
_a = max(
UpperCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase ) )
return max_revue
def snake_case_ (UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
_enforce_args(UpperCamelCase , UpperCamelCase )
_a = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : list , UpperCamelCase : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float('''-inf''' )
for i in range(1 , n + 1 ):
_a = max(
UpperCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase , UpperCamelCase ) , )
_a = max_revenue
return max_rev[n]
def snake_case_ (UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
_enforce_args(UpperCamelCase , UpperCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float('''-inf''' ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(UpperCamelCase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def snake_case_ (UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
if n < 0:
_a = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(UpperCamelCase )
if n > len(UpperCamelCase ):
_a = (
'''Each integral piece of rod must have a corresponding price. '''
f'Got n = {n} but length of prices = {len(UpperCamelCase )}'
)
raise ValueError(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = [6, 10, 12, 15, 20, 23]
_a = len(UpperCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(UpperCamelCase , UpperCamelCase )
_a = bottom_up_cut_rod(UpperCamelCase , UpperCamelCase )
_a = naive_cut_rod_recursive(UpperCamelCase , UpperCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_snake_case : int = 'docs/source/en/_toctree.yml'
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = defaultdict(UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_a = [key for key, value in counts.items() if value > 1]
_a = []
for duplicate_key in duplicates:
_a = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase , key=lambda UpperCamelCase : s["title"].lower() )
def snake_case_ (UpperCamelCase : str=False ):
'''simple docstring'''
with open(UpperCamelCase , encoding='''utf-8''' ) as f:
_a = yaml.safe_load(f.read() )
# Get to the API doc
_a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_a = content[api_idx]['''sections''']
# Then to the model doc
_a = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_a = api_doc[model_idx]['''sections''']
_a = [(idx, section) for idx, section in enumerate(UpperCamelCase ) if '''sections''' in section]
_a = False
for idx, modality_doc in modalities_docs:
_a = modality_doc['''sections''']
_a = clean_model_doc_toc(UpperCamelCase )
if old_modality_doc != new_modality_doc:
_a = True
if overwrite:
_a = new_modality_doc
if diff:
if overwrite:
_a = model_doc
_a = api_doc
with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase , allow_unicode=UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_snake_case : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 179 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE : Any = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE : Any = {"""mgp-str""": 27}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , a_ , a_="[GO]" , a_="[GO]" , a_="[s]" , a_="[GO]" , **a_ ):
'''simple docstring'''
super().__init__(
unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , **a_ , )
with open(a_ , encoding='''utf-8''' ) as vocab_handle:
__snake_case : str = json.load(a_ )
__snake_case : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.vocab )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = []
for s in text:
char_tokens.extend(a_ )
return char_tokens
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.vocab.get(a_ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.decoder.get(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a_ ) )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(a_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + '''\n''' )
return (vocab_file,)
| 102 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102 | 1 |
from collections import defaultdict
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
lowercase_ : Union[str, Any] = first_str.lower().strip()
lowercase_ : Any = second_str.lower().strip()
# Remove whitespace
lowercase_ : Optional[int] = first_str.replace(""" """ , """""" )
lowercase_ : Tuple = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
lowercase_ : defaultdict[str, int] = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : Any = input("Enter the first string ").strip()
_lowercase : List[str] = input("Enter the second string ").strip()
_lowercase : Optional[int] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 360 | '''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[str]:
if isinstance(UpperCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __magic_name__ :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float ):
lowercase_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple=None , **lowercase_ : Optional[int] ):
lowercase_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : Any = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[Any]=None , **lowercase_ : Tuple ):
lowercase_ , lowercase_ : Any = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : List[Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=None , **lowercase_ : int ):
lowercase_ , lowercase_ : Union[str, Any] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : List[str] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
lowercase_ : Union[str, Any] = after_output[0]
lowercase_ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict=None , **lowercase_ : Optional[Any] ):
lowercase_ , lowercase_ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ )
lowercase_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
lowercase_ : Optional[int] = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
lowercase_ : Tuple = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : List[str] = to_atuple(vision_model.config.image_size )
lowercase_ : Optional[Any] = to_atuple(vision_model.config.patch_size )
lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase_ : Optional[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase_ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int ):
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
lowercase_ : int = inputs_dict
lowercase_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase_ : str = pt_model(**lowercase_ ).to_tuple()
lowercase_ : Optional[Any] = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
lowercase_ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
lowercase_ : Dict = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
lowercase_ : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
lowercase_ : List[Any] = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : List[Any] = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
lowercase_ : Tuple = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
lowercase_ : int = VisionTextDualEncoderModel(lowercase_ )
lowercase_ : Dict = FlaxVisionTextDualEncoderModel(lowercase_ )
lowercase_ : Optional[Any] = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_inputs_dict.pop("""vision_config""" )
lowercase_ : int = config_inputs_dict.pop("""text_config""" )
lowercase_ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ , lowercase_ : str = self.get_pretrained_model_and_inputs()
lowercase_ : Dict = model_a(**lowercase_ )
lowercase_ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
lowercase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
lowercase_ : str = model_a(**lowercase_ )
lowercase_ : Union[str, Any] = after_outputs[0]
lowercase_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : Any = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : str = random_attention_mask([batch_size, 4] )
lowercase_ : List[str] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple ):
lowercase_ : Union[str, Any] = FlaxViTModel(lowercase_ )
lowercase_ : Dict = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = FlaxViTModelTester(self )
lowercase_ : Optional[Any] = FlaxBertModelTester(self )
lowercase_ : Dict = vit_model_tester.prepare_config_and_inputs()
lowercase_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : List[str] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
lowercase_ : List[str] = 13
lowercase_ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase_ : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase_ : Tuple = random_attention_mask([batch_size, 4] )
lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
lowercase_ : Tuple = FlaxCLIPVisionModel(lowercase_ )
lowercase_ : Any = FlaxBertModel(lowercase_ )
return vision_model, text_model
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = FlaxCLIPVisionModelTester(self )
lowercase_ : Tuple = FlaxBertModelTester(self )
lowercase_ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
lowercase_ : Any = bert_model_tester.prepare_config_and_inputs()
lowercase_ , lowercase_ : Optional[Any] = vision_config_and_inputs
lowercase_ , lowercase_ , lowercase_ , lowercase_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowercase_ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowercase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowercase_ : Optional[int] = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
lowercase_ : List[str] = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase_ : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1E-3 ) )
| 21 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
return np.dot(_lowerCamelCase , _lowerCamelCase )
class __snake_case :
def __init__( self ,*,
snake_case = np.inf ,snake_case = "linear" ,snake_case = 0.0 ,):
'''simple docstring'''
lowercase : str = regularization
lowercase : Union[str, Any] = gamma
if kernel == "linear":
lowercase : Any = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
lowercase : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase : Union[str, Any] = f"Unknown kernel: {kernel}"
raise ValueError(__a )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
return np.dot(__a ,__a )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = observations
lowercase : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(lowercase ) : str = np.shape(__a )
def to_minimize(snake_case ) -> float:
lowercase : Tuple = 0
(lowercase ) : Tuple = np.shape(__a )
for i in range(__a ):
for j in range(__a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__a )
lowercase : str = LinearConstraint(__a ,0 ,0 )
lowercase : int = Bounds(0 ,self.regularization )
lowercase : int = minimize(
__a ,np.ones(__a ) ,bounds=__a ,constraints=[ly_contraint] ).x
lowercase : Any = l_star
# calculating mean offset of separation plane to points
lowercase : int = 0
for i in range(__a ):
for j in range(__a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowercase : Any = s / n
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = IMAGENET_DEFAULT_MEAN , A_ = IMAGENET_DEFAULT_STD , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__UpperCamelCase =int((256 / 224) * size['shortest_edge'] )
__UpperCamelCase =get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
__UpperCamelCase ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> BatchFeature:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' )
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(A_ , A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(A_ , A_ , A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 117 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , ) -> None:
super().__init__(**A_ )
__UpperCamelCase =size if size is not None else {'shortest_edge': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase =do_convert_rgb
def _a ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__UpperCamelCase =get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
__UpperCamelCase =get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _a ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> PIL.Image.Image:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(A_ , param_name='size' , default_to_square=A_ )
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase =make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase =[convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(A_ ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(A_ , A_ ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 117 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.