code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Any = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase : Union[str, Any] = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase : Optional[Any] = concatenate_datasets
lowerCamelCase : Any = DownloadConfig
lowerCamelCase : int = DownloadManager
lowerCamelCase : Optional[int] = DownloadMode
lowerCamelCase : List[Any] = DownloadConfig
lowerCamelCase : List[str] = DownloadMode
lowerCamelCase : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[str] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : str , lowerCAmelCase_ : set , lowerCAmelCase_ : set , lowerCAmelCase_ : dict , lowerCAmelCase_ : dict , lowerCAmelCase_ : PriorityQueue , lowerCAmelCase_ : dict , lowerCAmelCase_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowercase : Tuple = cst_fwd.get(lowerCAmelCase_ , np.inf )
__lowercase : Any = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowercase : int = new_cost_f
__lowercase : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowercase : int = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : dict , lowerCAmelCase_ : dict ):
__lowercase : str = -1
__lowercase : List[str] = set()
__lowercase : Dict = set()
__lowercase : Any = {source: 0}
__lowercase : Tuple = {destination: 0}
__lowercase : Optional[int] = {source: None}
__lowercase : Dict = {destination: None}
__lowercase : PriorityQueue[Any] = PriorityQueue()
__lowercase : PriorityQueue[Any] = PriorityQueue()
__lowercase : Optional[int] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowercase , __lowercase : Union[str, Any] = queue_forward.get()
visited_forward.add(lowerCAmelCase_ )
__lowercase , __lowercase : Any = queue_backward.get()
visited_backward.add(lowerCAmelCase_ )
__lowercase : int = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
__lowercase : str = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowercase : Union[str, Any] = shortest_distance
return shortest_path_distance
lowerCamelCase : Any = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowerCamelCase : Any = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> str:
"""simple docstring"""
__lowercase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : int = np.random.RandomState(__a )
__lowercase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : List[str] = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : str = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Any = pipe(**__a ).images
__lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**__a ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[Any] = ort.SessionOptions()
__lowercase : Tuple = False
return options
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = """A fantasy landscape, trending on artstation"""
__lowercase : Tuple = np.random.RandomState(0 )
__lowercase : Any = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : int = output.images
__lowercase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : int = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Optional[Any] = init_image.resize((768, 512) )
__lowercase : Dict = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = """A fantasy landscape, trending on artstation"""
__lowercase : Dict = np.random.RandomState(0 )
__lowercase : Optional[int] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : str = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Union[str, "sqlalchemy.sql.Selectable"] , __a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , **__a : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
__lowercase : Tuple = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : str = None
__lowercase : Any = None
__lowercase : List[Any] = None
__lowercase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
__lowercase : Dict = self.builder.as_dataset(
split="""train""" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Dataset , __a : str , __a : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __a : Optional[int] = None , __a : Optional[int] = None , **__a : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__lowercase : List[str] = dataset
__lowercase : Tuple = name
__lowercase : Any = con
__lowercase : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase : str = num_proc
__lowercase : List[Any] = to_sql_kwargs
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase : str = self.to_sql_kwargs.pop("""sql""" , __a )
__lowercase : Optional[Any] = self.to_sql_kwargs.pop("""con""" , __a )
__lowercase : int = self.to_sql_kwargs.pop("""index""" , __a )
__lowercase : List[Any] = self._write(index=__a , **self.to_sql_kwargs )
return written
def lowerCAmelCase ( self : str , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase : Optional[int] = args
__lowercase : Tuple = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
__lowercase : Tuple = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
__lowercase : int = batch.to_pandas()
__lowercase : str = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , **__a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowercase , __lowercase : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
lowerCamelCase : Any = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
lowerCamelCase : Any = '''</w>'''
lowerCamelCase : Optional[int] = '''@@ '''
def snake_case_ ( lowerCAmelCase_ : Dict ):
__lowercase : Optional[Any] = set()
__lowercase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : int = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase : Optional[int] = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __a : Optional[Any] , __a : List[Any]="<s>" , __a : str="<pad>" , __a : List[Any]="</s>" , __a : Dict="<unk>" , __a : Any=False , __a : Union[str, Any]=None , **__a : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__lowercase : int = do_lower_case
with open(__a , encoding="""utf-8""" ) as vocab_handle:
__lowercase : List[Any] = json.load(__a )
__lowercase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
__lowercase : int = None
__lowercase : Union[str, Any] = None
else:
with open(__a , encoding="""utf-8""" ) as merges_handle:
__lowercase : List[str] = merges_handle.read().split("""\n""" )[:-1]
__lowercase : Union[str, Any] = [tuple(merge.split()[:2] ) for merge in merges]
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Union[str, Any] = {}
@property
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowercase : Dict = get_pairs(__a )
if not pairs:
return token
while True:
__lowercase : Any = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : Any = bigram
__lowercase : Dict = []
__lowercase : List[str] = 0
while i < len(__a ):
try:
__lowercase : Tuple = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : str = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : Dict = tuple(__a )
__lowercase : List[str] = new_word
if len(__a ) == 1:
break
else:
__lowercase : str = get_pairs(__a )
__lowercase : List[str] = """ """.join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowercase : Tuple = """\n""" + BPE_TOKEN_MERGES
if word.endswith(__a ):
__lowercase : List[Any] = word.replace(__a , """""" )
__lowercase : Union[str, Any] = word.replace(""" """ , __a )
__lowercase : Any = word
return word
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
__lowercase : Union[str, Any] = text.lower()
__lowercase : Union[str, Any] = text.split()
__lowercase : Optional[Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase ( self : Dict , __a : str ) -> int:
"""simple docstring"""
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Tuple , __a : int ) -> str:
"""simple docstring"""
__lowercase : str = self.decoder.get(__a , self.unk_token )
return result
def lowerCAmelCase ( self : Union[str, Any] , __a : List[str] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = """ """.join(__a )
# make sure @@ tokens are concatenated
__lowercase : Union[str, Any] = """""".join(string.split(__a ) )
return string
def lowerCAmelCase ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Tuple = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Optional[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
__lowercase : Dict = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__lowercase : str = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int = 1000 ):
__lowercase : List[Any] = 3
__lowercase : Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : Optional[int] = 16
lowerCamelCase : Optional[int] = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase : List[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase : Any = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__lowercase : List[Any] = 8
else:
__lowercase : List[str] = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase : Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__lowercase : int = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Union[str, Any] = mocked_dataloaders # noqa: F811
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase_ ) == "1":
__lowercase : int = 2
# Initialize accelerator
__lowercase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Tuple = config["""lr"""]
__lowercase : int = int(config["""num_epochs"""] )
__lowercase : int = int(config["""seed"""] )
__lowercase : Optional[int] = int(config["""batch_size"""] )
__lowercase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(lowerCAmelCase_ : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__lowercase : Optional[int] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
__lowercase , __lowercase : Dict = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
__lowercase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : int = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase : Dict = model(**lowerCAmelCase_ )
__lowercase : List[str] = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : str = model(**lowerCAmelCase_ )
__lowercase : int = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__lowercase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case_ ( ):
__lowercase : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowercase : Any = parser.parse_args()
__lowercase : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = '''conditional_detr'''
_A : int = ['''past_key_values''']
_A : Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[int] , __a : Optional[int]=True , __a : Dict=None , __a : int=3 , __a : List[str]=300 , __a : Optional[Any]=6 , __a : Optional[Any]=2048 , __a : str=8 , __a : Tuple=6 , __a : Tuple=2048 , __a : List[str]=8 , __a : Optional[Any]=0.0 , __a : Tuple=0.0 , __a : Union[str, Any]=True , __a : Dict="relu" , __a : Union[str, Any]=256 , __a : Dict=0.1 , __a : Tuple=0.0 , __a : int=0.0 , __a : Union[str, Any]=0.02 , __a : Union[str, Any]=1.0 , __a : Any=False , __a : Any="sine" , __a : List[str]="resnet50" , __a : str=True , __a : Tuple=False , __a : str=2 , __a : Union[str, Any]=5 , __a : Dict=2 , __a : List[Any]=1 , __a : int=1 , __a : Dict=2 , __a : Dict=5 , __a : str=2 , __a : List[str]=0.25 , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__lowercase : int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
__lowercase : Union[str, Any] = backbone_config.get("""model_type""" )
__lowercase : Tuple = CONFIG_MAPPING[backbone_model_type]
__lowercase : Optional[int] = config_class.from_dict(__a )
__lowercase : Optional[Any] = use_timm_backbone
__lowercase : List[str] = backbone_config
__lowercase : List[str] = num_channels
__lowercase : Any = num_queries
__lowercase : Tuple = d_model
__lowercase : Dict = encoder_ffn_dim
__lowercase : Optional[int] = encoder_layers
__lowercase : List[Any] = encoder_attention_heads
__lowercase : str = decoder_ffn_dim
__lowercase : Optional[int] = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Optional[Any] = dropout
__lowercase : Dict = attention_dropout
__lowercase : int = activation_dropout
__lowercase : Any = activation_function
__lowercase : int = init_std
__lowercase : int = init_xavier_std
__lowercase : List[Any] = encoder_layerdrop
__lowercase : Optional[Any] = decoder_layerdrop
__lowercase : List[Any] = encoder_layers
__lowercase : int = auxiliary_loss
__lowercase : Optional[Any] = position_embedding_type
__lowercase : List[Any] = backbone
__lowercase : str = use_pretrained_backbone
__lowercase : str = dilation
# Hungarian matcher
__lowercase : Union[str, Any] = class_cost
__lowercase : List[Any] = bbox_cost
__lowercase : Any = giou_cost
# Loss coefficients
__lowercase : Optional[int] = mask_loss_coefficient
__lowercase : Any = dice_loss_coefficient
__lowercase : str = cls_loss_coefficient
__lowercase : Any = bbox_loss_coefficient
__lowercase : str = giou_loss_coefficient
__lowercase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.d_model
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__lowercase : Optional[Any] = self.backbone_config.to_dict()
__lowercase : Optional[int] = self.__class__.model_type
return output
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
_A : int = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowercase : Any = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowercase : Optional[int] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
default=__a , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowercase : List[Any] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowercase : int = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowercase : List[str] = data_args.train_file.split(""".""" )[-1]
__lowercase : List[str] = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowercase : List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowercase : Optional[int] = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowercase : Dict = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowercase : Optional[int] = raw_datasets["""train"""].features["""label"""].names
__lowercase : List[Any] = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowercase : Union[str, Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , )
__lowercase : int = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowercase : str = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowercase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowercase : Tuple = {"""Refused""": 0, """Entailed""": 1}
__lowercase : List[str] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowercase : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase_ : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase_ : Union[str, Any] ):
__lowercase : Optional[int] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowercase : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowercase : List[Any] = examples["""statement"""]
__lowercase : Tuple = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowercase : int = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
__lowercase : List[Any] = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowercase : Tuple = raw_datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowercase : List[str] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowercase : Optional[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowercase : Union[str, Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowercase : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowercase : Tuple = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowercase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : EvalPrediction ):
__lowercase : List[Any] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
__lowercase : int = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowercase : str = default_data_collator
elif training_args.fpaa:
__lowercase : Optional[int] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
__lowercase : List[Any] = None
# Initialize our Trainer
__lowercase : List[str] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__lowercase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowercase : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase : Any = last_checkpoint
__lowercase : Tuple = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
__lowercase : Dict = train_result.metrics
__lowercase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowercase : List[str] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase : Union[str, Any] = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
__lowercase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__lowercase : Dict = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowercase : Dict = predict_dataset.remove_columns("""label""" )
__lowercase : int = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions
__lowercase : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 )
__lowercase : Dict = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowerCAmelCase_ ):
__lowercase : int = label_list[item]
writer.write(F"{index}\t{item}\n" )
__lowercase : Dict = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__a , """num_heads""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : List[Any] , __a : Dict=13 , __a : Optional[Any]=64 , __a : Optional[int]=3 , __a : Dict=[16, 48, 96] , __a : Dict=[1, 3, 6] , __a : Dict=[1, 2, 10] , __a : Tuple=[7, 3, 3] , __a : List[str]=[4, 2, 2] , __a : Optional[int]=[2, 1, 1] , __a : List[Any]=[2, 2, 2] , __a : List[str]=[False, False, True] , __a : int=[0.0, 0.0, 0.0] , __a : Union[str, Any]=0.02 , __a : Any=1E-12 , __a : Union[str, Any]=True , __a : List[Any]=True , __a : List[Any]=2 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = parent
__lowercase : Optional[Any] = batch_size
__lowercase : int = image_size
__lowercase : str = patch_sizes
__lowercase : Dict = patch_stride
__lowercase : Optional[Any] = patch_padding
__lowercase : Dict = is_training
__lowercase : Tuple = use_labels
__lowercase : List[str] = num_labels
__lowercase : Union[str, Any] = num_channels
__lowercase : Any = embed_dim
__lowercase : int = num_heads
__lowercase : Dict = stride_kv
__lowercase : Optional[Any] = depth
__lowercase : int = cls_token
__lowercase : Dict = attention_drop_rate
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[str] = layer_norm_eps
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Any = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Dict , __a : Any , __a : List[str] , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = TFCvtModel(config=__a )
__lowercase : str = model(__a , training=__a )
__lowercase : Dict = (self.image_size, self.image_size)
__lowercase , __lowercase : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase : Dict = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Optional[int] , __a : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = TFCvtForImageClassification(__a )
__lowercase : Optional[Any] = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_A : Dict = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_A : int = False
_A : Any = False
_A : List[str] = False
_A : Optional[Any] = False
_A : Tuple = False
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = TFCvtModelTester(self )
__lowercase : str = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Union[str, Any] = [*signature.parameters.keys()]
__lowercase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(__a : Tuple , __a : int , __a : List[Any] ):
__lowercase : Optional[Any] = model_class(__a )
__lowercase : str = model(**self._prepare_for_class(__a , __a ) )
__lowercase : str = outputs.hidden_states
__lowercase : int = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowercase : Any = self.default_image_processor
__lowercase : Optional[int] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=__a , return_tensors="""tf""" )
# forward pass
__lowercase : List[str] = model(**__a )
# verify the logits
__lowercase : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Dict = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase ( self : List[Any] , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[str] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a )
__lowercase : List[Any] = GenerationConfig.from_pretrained(__a , config_name=__a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __a )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = AutoConfig.from_pretrained("""gpt2""" )
__lowercase : List[str] = GenerationConfig.from_model_config(__a )
__lowercase : List[str] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = GenerationConfig()
__lowercase : Dict = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
__lowercase : Tuple = copy.deepcopy(__a )
__lowercase : Optional[Any] = generation_config.update(**__a )
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"""foo""": """bar"""} )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = GenerationConfig()
__lowercase : Any = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(__a )
__lowercase : Optional[Any] = GenerationConfig.from_pretrained(__a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowercase : Any = GenerationConfig.from_model_config(__a )
assert not hasattr(__a , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __a )
self.assertEqual(default_config.num_beams , 1 )
__lowercase : Dict = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
__lowercase : Any = GenerationConfig.from_pretrained(__a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = TOKEN
HfFolder.save_token(__a )
@classmethod
def lowerCAmelCase ( cls : List[str] ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowercase : Dict = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="""test-generation-config""" , push_to_hub=__a , use_auth_token=self._token )
__lowercase : Optional[int] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowercase : Optional[Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__a , use_auth_token=self._token )
__lowercase : Dict = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : Any = TypeVar('''T''')
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
_A : deque[T] # Cache store of keys
_A : set[T] # References of the keys in cache
_A : int = 10 # Maximum capacity of cache
def __init__( self : Optional[int] , __a : int ) -> None:
"""simple docstring"""
__lowercase : List[Any] = deque()
__lowercase : Any = set()
if not n:
__lowercase : Tuple = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
__lowercase : Dict = n
def lowerCAmelCase ( self : Any , __a : T ) -> None:
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowercase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(__a )
else:
self.dq_store.remove(__a )
self.dq_store.appendleft(__a )
self.key_reference.add(__a )
def lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
for k in self.dq_store:
print(__a )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
if height >= 1:
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
move_disk(lowerCAmelCase_ , lowerCAmelCase_ )
move_tower(height - 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
print("""moving disk from""" , lowerCAmelCase_ , """to""" , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : int = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowerCAmelCase_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import os
def snake_case_ ( ):
with open(os.path.dirname(lowerCAmelCase_ ) + """/grid.txt""" ) as f:
__lowercase : Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase_ ) for x in f.readline().split()] )
__lowercase : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
__lowercase : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__lowercase : Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
__lowercase : Optional[int] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__lowercase : List[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__lowercase : Tuple = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__lowercase : Optional[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__lowercase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__lowercase : int = temp
return maximum
if __name__ == "__main__":
print(solution())
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
from datetime import datetime as dt
import os
from github import Github
lowerCamelCase : Dict = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def snake_case_ ( ):
__lowercase : str = Github(os.environ["""GITHUB_TOKEN"""] )
__lowercase : Optional[int] = g.get_repo("""huggingface/transformers""" )
__lowercase : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
__lowercase : int = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase_ : i.created_at , reverse=lowerCAmelCase_ )
__lowercase : List[Any] = comments[0] if len(lowerCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = ReformerTokenizer
_A : int = ReformerTokenizerFast
_A : Any = True
_A : Tuple = False
_A : Union[str, Any] = True
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().setUp()
__lowercase : str = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = """<s>"""
__lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__a ) , 1000 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : Any = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : Dict = """I was born in 92000, and this is falsé."""
__lowercase : Any = tokenizer.tokenize(__a )
__lowercase : Any = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowercase : str = self.get_rust_tokenizer()
__lowercase : Optional[int] = tokenizer.encode(__a )
__lowercase : Optional[int] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any]=15 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__lowercase : Optional[Any] = """This is a simple input"""
__lowercase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase : List[Any] = ("""This is a simple input""", """This is a pair""")
__lowercase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ReformerTokenizer(__a , keep_accents=__a )
__lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__lowercase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Any = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = """Hello World!"""
__lowercase : Optional[int] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : int = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__lowercase : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : int = """ """.join(__a )
__lowercase : int = self.big_tokenizer.encode_plus(__a , return_tensors="""pt""" )
__lowercase : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__lowercase : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__lowercase : str = encoded_sequence["""input_ids"""].shape
__lowercase : Optional[Any] = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : int = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__lowercase : List[str] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__a , sequences=__a , )
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''unispeech'''
def __init__( self : int , __a : Dict=32 , __a : Tuple=768 , __a : Optional[Any]=12 , __a : Tuple=12 , __a : List[str]=3072 , __a : str="gelu" , __a : Any=0.1 , __a : Dict=0.1 , __a : Any=0.1 , __a : Optional[Any]=0.0 , __a : int=0.0 , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.02 , __a : int=1E-5 , __a : Union[str, Any]="group" , __a : Union[str, Any]="gelu" , __a : int=(512, 512, 512, 512, 512, 512, 512) , __a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __a : int=(10, 3, 3, 3, 3, 2, 2) , __a : Any=False , __a : int=128 , __a : int=16 , __a : Optional[int]=False , __a : Tuple=True , __a : List[str]=0.05 , __a : Optional[int]=10 , __a : List[Any]=2 , __a : List[Any]=0.0 , __a : Any=10 , __a : List[Any]=0 , __a : Optional[Any]=320 , __a : Tuple=2 , __a : Optional[int]=0.1 , __a : List[str]=100 , __a : int=256 , __a : Optional[Any]=256 , __a : Dict=0.1 , __a : Optional[int]="mean" , __a : str=False , __a : Optional[int]=False , __a : Union[str, Any]=256 , __a : Tuple=80 , __a : Union[str, Any]=0 , __a : Union[str, Any]=1 , __a : Any=2 , __a : int=0.5 , **__a : Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
__lowercase : int = hidden_size
__lowercase : Tuple = feat_extract_norm
__lowercase : str = feat_extract_activation
__lowercase : Optional[Any] = list(__a )
__lowercase : List[str] = list(__a )
__lowercase : List[str] = list(__a )
__lowercase : str = conv_bias
__lowercase : Dict = num_conv_pos_embeddings
__lowercase : List[str] = num_conv_pos_embedding_groups
__lowercase : int = len(self.conv_dim )
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : Union[str, Any] = intermediate_size
__lowercase : Any = hidden_act
__lowercase : str = num_attention_heads
__lowercase : Optional[int] = hidden_dropout
__lowercase : List[str] = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : List[Any] = feat_proj_dropout
__lowercase : int = final_dropout
__lowercase : Any = layerdrop
__lowercase : Dict = layer_norm_eps
__lowercase : Optional[Any] = initializer_range
__lowercase : Dict = num_ctc_classes
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[int] = do_stable_layer_norm
__lowercase : Optional[Any] = use_weighted_layer_sum
__lowercase : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : int = apply_spec_augment
__lowercase : Optional[int] = mask_time_prob
__lowercase : Tuple = mask_time_length
__lowercase : Dict = mask_time_min_masks
__lowercase : Optional[int] = mask_feature_prob
__lowercase : Dict = mask_feature_length
__lowercase : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowercase : List[str] = num_codevectors_per_group
__lowercase : Union[str, Any] = num_codevector_groups
__lowercase : Optional[Any] = contrastive_logits_temperature
__lowercase : Optional[int] = feat_quantizer_dropout
__lowercase : List[Any] = num_negatives
__lowercase : Union[str, Any] = codevector_dim
__lowercase : str = proj_codevector_dim
__lowercase : List[Any] = diversity_loss_weight
# ctc loss
__lowercase : Tuple = ctc_loss_reduction
__lowercase : Optional[int] = ctc_zero_infinity
# pretraining loss
__lowercase : str = replace_prob
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = ProphetNetTokenizer
_A : Optional[Any] = False
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
__lowercase : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : List[Any] , __a : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = """UNwant\u00E9d,running"""
__lowercase : List[str] = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase : List[Any] = self.tokenizer_class(self.vocab_file )
__lowercase : Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = BasicTokenizer(do_lower_case=__a , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase : Optional[Any] = {}
for i, token in enumerate(__a ):
__lowercase : Any = i
__lowercase : str = WordpieceTokenizer(vocab=__a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowercase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowercase : List[str] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__lowercase : str = tokenizer(__a , padding=__a , return_tensors="""pt""" )
self.assertIsInstance(__a , __a )
__lowercase : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__lowercase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Tuple = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case_ ( lowerCAmelCase_ : bytes , lowerCAmelCase_ : int ):
__lowercase : List[str] = F"{sampling_rate}"
__lowercase : List[str] = """1"""
__lowercase : Tuple = """f32le"""
__lowercase : Tuple = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowercase : Union[str, Any] = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
__lowercase : str = output_stream[0]
__lowercase : str = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : str = "f32le" , ):
__lowercase : List[Any] = F"{sampling_rate}"
__lowercase : Tuple = """1"""
if format_for_conversion == "s16le":
__lowercase : Optional[Any] = 2
elif format_for_conversion == "f32le":
__lowercase : Tuple = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__lowercase : int = platform.system()
if system == "Linux":
__lowercase : Optional[Any] = """alsa"""
__lowercase : Optional[Any] = """default"""
elif system == "Darwin":
__lowercase : str = """avfoundation"""
__lowercase : Optional[int] = """:0"""
elif system == "Windows":
__lowercase : Tuple = """dshow"""
__lowercase : int = """default"""
__lowercase : str = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
__lowercase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowercase : Optional[int] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[Tuple[float, float], float]] = None , lowerCAmelCase_ : str = "f32le" , ):
if stream_chunk_s is not None:
__lowercase : Optional[int] = stream_chunk_s
else:
__lowercase : List[str] = chunk_length_s
__lowercase : List[str] = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
__lowercase : Any = np.intaa
__lowercase : str = 2
elif format_for_conversion == "f32le":
__lowercase : Any = np.floataa
__lowercase : List[str] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__lowercase : Union[str, Any] = chunk_length_s / 6
__lowercase : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
__lowercase : Dict = [stride_length_s, stride_length_s]
__lowercase : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowercase : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowercase : Dict = datetime.datetime.now()
__lowercase : int = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
__lowercase : str = np.frombuffer(item["""raw"""] , dtype=lowerCAmelCase_ )
__lowercase : int = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
__lowercase : List[str] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple[int, int] , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[Any] = b""""""
__lowercase , __lowercase : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__lowercase : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
__lowercase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
__lowercase : Union[str, Any] = (_stride_left, stride_right)
__lowercase : Optional[int] = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
__lowercase : List[Any] = False
yield item
__lowercase : List[str] = stride_left
__lowercase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
__lowercase : List[str] = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
__lowercase : List[Any] = False
yield item
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Any = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
__lowercase : List[str] = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__lowercase : Any = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__lowercase : int = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__lowercase : Optional[Any] = shift_tokens_right(__a , model.config.pad_token_id , model.config.decoder_start_token_id )
__lowercase : Dict = model(__a , decoder_input_ids=__a ).logits
__lowercase : Union[str, Any] = optax.softmax_cross_entropy(__a , onehot(__a , logits.shape[-1] ) ).mean()
__lowercase : Any = -(labels.shape[-1] * loss.item())
__lowercase : str = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
from sklearn.metrics import recall_score
import datasets
lowerCamelCase : Optional[Any] = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
lowerCamelCase : Tuple = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
lowerCamelCase : Dict = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : int , __a : Dict=None , __a : List[Any]=1 , __a : Optional[Any]="binary" , __a : Optional[int]=None , __a : List[Any]="warn" , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = recall_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a , zero_division=__a , )
return {"recall": float(__a ) if score.size == 1 else score}
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase : str = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCamelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase : List[str] = dict(zip(vocab, range(len(vocab))))
lowerCamelCase : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : int = Path(tmpdirname)
lowerCamelCase : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCamelCase : str = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCamelCase : Union[str, Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowerCamelCase : Union[str, Any] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase : List[str] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase : int = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
lowerCamelCase : int = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = '''gpt_neox'''
def __init__( self : Tuple , __a : str=50432 , __a : List[str]=6144 , __a : Dict=44 , __a : Optional[Any]=64 , __a : Optional[int]=24576 , __a : Optional[Any]="gelu" , __a : int=0.25 , __a : Any=10000 , __a : int=0.0 , __a : int=0.0 , __a : Union[str, Any]=0.1 , __a : str=2048 , __a : str=0.02 , __a : List[str]=1E-5 , __a : Union[str, Any]=True , __a : Any=0 , __a : Any=2 , __a : Optional[int]=False , __a : str=True , __a : Union[str, Any]=None , **__a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : List[Any] = vocab_size
__lowercase : Any = max_position_embeddings
__lowercase : List[Any] = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : Any = rotary_pct
__lowercase : Tuple = rotary_emb_base
__lowercase : str = attention_dropout
__lowercase : Any = hidden_dropout
__lowercase : List[Any] = classifier_dropout
__lowercase : List[Any] = initializer_range
__lowercase : Tuple = layer_norm_eps
__lowercase : Any = use_cache
__lowercase : Dict = tie_word_embeddings
__lowercase : Optional[int] = use_parallel_residual
__lowercase : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"got {self.rope_scaling}" )
__lowercase : Union[str, Any] = self.rope_scaling.get("""type""" , __a )
__lowercase : str = self.rope_scaling.get("""factor""" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Dict = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
__lowercase : List[str] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowercase : Tuple = 1
if upper_limit > 0:
__lowercase : Dict = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowerCamelCase : Dict = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCamelCase : Union[str, Any] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = '''facebook/nllb-200-distilled-600M'''
_A : List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_A : Tuple = '''translator'''
_A : str = AutoTokenizer
_A : Tuple = AutoModelForSeqaSeqLM
_A : Optional[int] = LANGUAGE_CODES
_A : str = ['''text''', '''text''', '''text''']
_A : Any = ['''text''']
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] , __a : Dict ) -> Dict:
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
__lowercase : List[str] = self.lang_to_code[src_lang]
__lowercase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors="""pt""" , src_lang=__a , tgt_lang=__a )
def lowerCAmelCase ( self : List[Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
return self.model.generate(**__a )
def lowerCAmelCase ( self : Dict , __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = [
[],
[],
[],
]
def lowerCAmelCase ( self : str , __a : int , __a : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : Any ) -> str:
"""simple docstring"""
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = []
def lowerCAmelCase ( self : Optional[int] , __a : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(__a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
__lowercase : Optional[int] = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self : Any ) -> str:
"""simple docstring"""
return str(self.queue )
def snake_case_ ( ):
__lowercase : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(lowerCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCAmelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case_ ( ):
__lowercase : Any = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowerCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCAmelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
from collections.abc import Generator
from math import sin
def snake_case_ ( lowerCAmelCase_ : bytes ):
if len(lowerCAmelCase_ ) != 32:
raise ValueError("""Input must be of length 32""" )
__lowercase : Union[str, Any] = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ ( lowerCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
__lowercase : List[str] = format(lowerCAmelCase_ , """08x""" )[-8:]
__lowercase : Optional[int] = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def snake_case_ ( lowerCAmelCase_ : bytes ):
__lowercase : Any = b""""""
for char in message:
bit_string += format(lowerCAmelCase_ , """08b""" ).encode("""utf-8""" )
__lowercase : int = format(len(lowerCAmelCase_ ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCAmelCase_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ ( lowerCAmelCase_ : bytes ):
if len(lowerCAmelCase_ ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(lowerCAmelCase_ ) , 512 ):
__lowercase : List[str] = bit_string[pos : pos + 512]
__lowercase : Dict = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ ( lowerCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
__lowercase : Any = format(lowerCAmelCase_ , """032b""" )
__lowercase : Optional[Any] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCAmelCase_ , 2 )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return (a + b) % 2**32
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ ( lowerCAmelCase_ : bytes ):
__lowercase : List[str] = preprocess(lowerCAmelCase_ )
__lowercase : int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowercase : int = 0x6745_2301
__lowercase : Dict = 0xefcd_ab89
__lowercase : str = 0x98ba_dcfe
__lowercase : Dict = 0x1032_5476
__lowercase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = aa
__lowercase : int = ba
__lowercase : Optional[Any] = ca
__lowercase : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowercase : str = d ^ (b & (c ^ d))
__lowercase : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowercase : str = c ^ (d & (b ^ c))
__lowercase : Dict = (5 * i + 1) % 16
elif i <= 47:
__lowercase : List[Any] = b ^ c ^ d
__lowercase : Tuple = (3 * i + 5) % 16
else:
__lowercase : int = c ^ (b | not_aa(lowerCAmelCase_ ))
__lowercase : List[Any] = (7 * i) % 16
__lowercase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowercase : List[Any] = d
__lowercase : str = c
__lowercase : Dict = b
__lowercase : int = sum_aa(lowerCAmelCase_ , left_rotate_aa(lowerCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowercase : List[str] = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Any = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Union[str, Any] = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : List[Any] = sum_aa(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ ) + reformat_hex(lowerCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Dict = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowercase : Tuple = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if k.startswith("""encoder""" ):
__lowercase : int = k.replace(""".attn""" , """.self_attn""" )
__lowercase : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : Union[str, Any] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__lowercase : Dict = k.replace("""norm1""" , """self_attn_layer_norm""" )
__lowercase : Dict = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__lowercase : str = k.replace("""norm3""" , """final_layer_norm""" )
return k
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__lowercase : List[str] = sd.pop(lowerCAmelCase_ )
__lowercase : Optional[int] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__lowercase : Optional[Any] = v
lowerCamelCase : Optional[int] = ['''START''']
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
__lowercase : Union[str, Any] = model["""model"""]
__lowercase : Any = BlenderbotConfig.from_json_file(lowerCAmelCase_ )
__lowercase : Union[str, Any] = BlenderbotForConditionalGeneration(lowerCAmelCase_ )
__lowercase : Any = m.model.state_dict().keys()
__lowercase : List[Any] = []
__lowercase : Tuple = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowercase : Optional[int] = rename_state_dict_key(lowerCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowercase : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase_ )
m.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
m.half()
m.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCamelCase : int = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[int] , __a : Any=2 , __a : Any=3 , __a : List[str]=4 , __a : Optional[Any]=2 , __a : Optional[Any]=7 , __a : int=True , __a : Dict=True , __a : Dict=True , __a : List[Any]=True , __a : int=99 , __a : Optional[int]=36 , __a : Dict=3 , __a : Union[str, Any]=4 , __a : Dict=37 , __a : List[str]="gelu" , __a : List[str]=0.1 , __a : int=0.1 , __a : Union[str, Any]=512 , __a : Optional[Any]=16 , __a : Any=2 , __a : Optional[Any]=0.02 , __a : List[Any]=6 , __a : Optional[Any]=6 , __a : List[Any]=3 , __a : List[str]=4 , __a : Dict=None , __a : Dict=1000 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = parent
__lowercase : Tuple = batch_size
__lowercase : Tuple = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[Any] = patch_size
__lowercase : Optional[Any] = text_seq_length
__lowercase : Tuple = is_training
__lowercase : Any = use_input_mask
__lowercase : str = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : str = vocab_size
__lowercase : Tuple = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : List[Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : Any = type_vocab_size
__lowercase : Any = type_sequence_label_size
__lowercase : Tuple = initializer_range
__lowercase : Tuple = coordinate_size
__lowercase : List[str] = shape_size
__lowercase : Tuple = num_labels
__lowercase : int = num_choices
__lowercase : Dict = scope
__lowercase : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowercase : int = text_seq_length
__lowercase : List[str] = (image_size // patch_size) ** 2 + 1
__lowercase : Any = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowercase : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowercase : List[Any] = bbox[i, j, 3]
__lowercase : str = bbox[i, j, 1]
__lowercase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowercase : Union[str, Any] = bbox[i, j, 2]
__lowercase : List[str] = bbox[i, j, 0]
__lowercase : int = t
__lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[str] = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowercase : int = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowercase : Optional[int] = None
__lowercase : Any = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowercase : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : str , __a : Optional[Any] , __a : str , __a : List[str] , __a : Any , __a : Dict , __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = LayoutLMvaModel(config=__a )
model.to(__a )
model.eval()
# text + image
__lowercase : Dict = model(__a , pixel_values=__a )
__lowercase : int = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a )
__lowercase : List[Any] = model(__a , bbox=__a , pixel_values=__a , token_type_ids=__a )
__lowercase : Union[str, Any] = model(__a , bbox=__a , pixel_values=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowercase : Tuple = model(pixel_values=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : List[str] , __a : List[str] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = self.num_labels
__lowercase : str = LayoutLMvaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : str = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __a : Optional[Any] , __a : str , __a : List[str] , __a : str , __a : Optional[Any] , __a : Optional[Any] , __a : Optional[int] , __a : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.num_labels
__lowercase : Optional[int] = LayoutLMvaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : Dict , __a : int , __a : Tuple , __a : List[str] , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = LayoutLMvaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Any = config_and_inputs
__lowercase : Optional[int] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = False
_A : List[Any] = False
_A : Tuple = False
_A : Union[str, Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Optional[Any] , __a : List[Any] , __a : List[Any] , __a : str ) -> List[Any]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = LayoutLMvaModelTester(self )
__lowercase : int = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : int , __a : List[str] , __a : int , __a : Optional[Any]=False ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = copy.deepcopy(__a )
if model_class in get_values(__a ):
__lowercase : str = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__a , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__lowercase : Any = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__a )
elif model_class in get_values(__a ):
__lowercase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
__lowercase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
elif model_class in [
*get_values(__a ),
]:
__lowercase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
elif model_class in [
*get_values(__a ),
]:
__lowercase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__a , )
return inputs_dict
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Optional[Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = LayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__a )
__lowercase : Tuple = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).pixel_values.to(__a )
__lowercase : Optional[Any] = torch.tensor([[1, 2]] )
__lowercase : int = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowercase : Tuple = model(
input_ids=input_ids.to(__a ) , bbox=bbox.to(__a ) , pixel_values=pixel_values.to(__a ) , )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
__lowercase : Optional[Any] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Dict = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
import random
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
__lowercase : Tuple = a[left_index]
__lowercase : Union[str, Any] = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
__lowercase , __lowercase : Optional[Any] = a[i], a[j]
i += 1
__lowercase , __lowercase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
if left < right:
__lowercase : List[str] = random.randint(lowerCAmelCase_ , right - 1 )
__lowercase , __lowercase : int = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__lowercase : Optional[int] = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def snake_case_ ( ):
__lowercase : Any = input("""Enter numbers separated by a comma:\n""" ).strip()
__lowercase : Tuple = [int(lowerCAmelCase_ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''nielsr/canine-s''': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCamelCase : str = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Union[str, Any] = 0xE_000
lowerCamelCase : List[Any] = 0xE_001
lowerCamelCase : Any = 0xE_002
lowerCamelCase : Dict = 0xE_003
lowerCamelCase : List[str] = 0xE_004
# Maps special codepoints to human-readable names.
lowerCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __a : Optional[int]=chr(__a ) , __a : List[Any]=chr(__a ) , __a : Dict=chr(__a ) , __a : List[str]=chr(__a ) , __a : List[str]=chr(__a ) , __a : Union[str, Any]=chr(__a ) , __a : List[str]=False , __a : Tuple=2048 , **__a : Dict , ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__lowercase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowercase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__lowercase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__lowercase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , model_max_length=__a , **__a , )
# Creates a mapping for looking up the IDs of special symbols.
__lowercase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__lowercase : List[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__lowercase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__lowercase : Optional[Any] = UNICODE_VOCAB_SIZE
__lowercase : Tuple = len(self._special_codepoints )
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def lowerCAmelCase ( self : Any , __a : str ) -> List[str]:
"""simple docstring"""
return list(__a )
def lowerCAmelCase ( self : Dict , __a : str ) -> int:
"""simple docstring"""
try:
return ord(__a )
except TypeError:
raise ValueError(F"invalid token: '{token}'" )
def lowerCAmelCase ( self : str , __a : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__a )
except TypeError:
raise ValueError(F"invalid id: {index}" )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[Any] ) -> str:
"""simple docstring"""
return "".join(__a )
def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Tuple = [self.sep_token_id]
__lowercase : Optional[int] = [self.cls_token_id]
__lowercase : Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__lowercase : str = [1] + ([0] * len(__a )) + [1]
if token_ids_a is not None:
result += ([0] * len(__a )) + [1]
return result
def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = [self.sep_token_id]
__lowercase : Any = [self.cls_token_id]
__lowercase : Tuple = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> str:
"""simple docstring"""
return ()
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case_ ( *lowerCAmelCase_ : Optional[Any] ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Any = list(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Tuple = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case_ ( lowerCAmelCase_ : Exception ):
__lowercase : Dict = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case_ ( lowerCAmelCase_ : callable = None , lowerCAmelCase_ : int = 128 ):
if function is None:
return functools.partial(lowerCAmelCase_ , starting_batch_size=lowerCAmelCase_ )
__lowercase : Optional[int] = starting_batch_size
def decorator(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Dict ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__lowercase : List[str] = list(inspect.signature(lowerCAmelCase_ ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase_ ) < (len(lowerCAmelCase_ ) + 1):
__lowercase : Dict = """, """.join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
import argparse
import os
import re
import packaging.version
lowerCamelCase : Tuple = '''examples/'''
lowerCamelCase : Dict = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCamelCase : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCamelCase : List[str] = '''README.md'''
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Union[str, Any] = f.read()
__lowercase , __lowercase : Tuple = REPLACE_PATTERNS[pattern]
__lowercase : List[str] = replace.replace("""VERSION""" , lowerCAmelCase_ )
__lowercase : Any = re_pattern.sub(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Dict ):
for folder, directories, fnames in os.walk(lowerCAmelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , pattern="""examples""" )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not patch:
update_version_in_examples(lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Dict = """🤗 Transformers currently provides the following architectures"""
__lowercase : Optional[Any] = """1. Want to contribute a new model?"""
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Tuple = f.readlines()
# Find the start of the list.
__lowercase : Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowercase : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__lowercase : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase_ )
def snake_case_ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__lowercase : int = f.read()
__lowercase : Dict = REPLACE_PATTERNS["""init"""][0].search(lowerCAmelCase_ ).groups()[0]
return packaging.version.parse(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__lowercase : str = default_version.base_version
elif patch:
__lowercase : str = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowercase : Any = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowercase : List[Any] = input(F"Which version are you releasing? [{default_version}]" )
if len(lowerCAmelCase_ ) == 0:
__lowercase : Dict = default_version
print(F"Updating version to {version}." )
global_version_update(lowerCAmelCase_ , patch=lowerCAmelCase_ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def snake_case_ ( ):
__lowercase : Tuple = get_version()
__lowercase : Any = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowercase : Optional[int] = current_version.base_version
# Check with the user we got that right.
__lowercase : List[str] = input(F"Which version are we developing now? [{dev_version}]" )
if len(lowerCAmelCase_ ) == 0:
__lowercase : int = dev_version
print(F"Updating version to {version}." )
global_version_update(lowerCAmelCase_ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
_A : int = 10000
_A : Optional[List[str]] = None
_A : Optional[datasets.Features] = None
class lowerCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_A : Dict = ParquetConfig
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> str:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__lowercase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
__lowercase : Any = data_files
if isinstance(__a , __a ):
__lowercase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : Any = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowercase : Any = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
__lowercase : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : str = [dl_manager.iter_files(__a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__a ):
with open(__a , """rb""" ) as f:
__lowercase : Optional[int] = datasets.Features.from_arrow_schema(pq.read_schema(__a ) )
break
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase ( self : Dict , __a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase : Dict = table_cast(__a , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase ( self : int , __a : Any ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , """rb""" ) as f:
__lowercase : str = pq.ParquetFile(__a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowercase : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(__a )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(__a )}: {e}" )
raise
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''roberta-prelayernorm'''
def __init__( self : List[str] , __a : List[str]=50265 , __a : str=768 , __a : Optional[int]=12 , __a : List[Any]=12 , __a : Tuple=3072 , __a : int="gelu" , __a : List[str]=0.1 , __a : List[str]=0.1 , __a : Dict=512 , __a : Union[str, Any]=2 , __a : List[Any]=0.02 , __a : Optional[int]=1E-12 , __a : Union[str, Any]=1 , __a : List[str]=0 , __a : List[Any]=2 , __a : int="absolute" , __a : Tuple=True , __a : List[Any]=None , **__a : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : str = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : List[str] = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Any = type_vocab_size
__lowercase : List[Any] = initializer_range
__lowercase : List[Any] = layer_norm_eps
__lowercase : List[str] = position_embedding_type
__lowercase : int = use_cache
__lowercase : List[Any] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 649
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Optional[Any] , __a : List[str]=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Tuple=True , __a : Optional[Any]=True , __a : Dict=True , __a : Optional[int]=99 , __a : Tuple=64 , __a : Any=5 , __a : Dict=4 , __a : Union[str, Any]=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : Any=0.1 , __a : int=512 , __a : Union[str, Any]=16 , __a : Optional[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=3 , __a : List[str]=4 , __a : Any=None , ) -> int:
"""simple docstring"""
__lowercase : str = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : int = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : Dict = num_labels
__lowercase : Dict = num_choices
__lowercase : Optional[int] = scope
__lowercase : Optional[Any] = vocab_size - 1
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[Any] = None
if self.use_labels:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase : Tuple = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : List[Any] , __a : Dict ) -> int:
"""simple docstring"""
__lowercase : Tuple = GPTNeoXModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a )
__lowercase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __a : Any , __a : Optional[int] , __a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = True
__lowercase : List[str] = GPTNeoXModel(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Tuple , __a : Any , __a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : List[str] , __a : str , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : List[str] = GPTNeoXForQuestionAnswering(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[str] , __a : Any , __a : List[Any] , __a : Union[str, Any] , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = self.num_labels
__lowercase : Optional[Any] = GPTNeoXForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Dict = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] , __a : Union[str, Any] , __a : Any , __a : List[str] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.num_labels
__lowercase : Optional[int] = GPTNeoXForTokenClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[int] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = True
__lowercase : Tuple = GPTNeoXForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
__lowercase : int = model(__a , attention_mask=__a , use_cache=__a )
__lowercase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , output_hidden_states=__a )
__lowercase : Optional[int] = output_from_no_past["""hidden_states"""][0]
__lowercase : List[str] = model(
__a , attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
__lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A : List[Any] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : Any = False
_A : Optional[Any] = False
_A : List[str] = False
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = GPTNeoXModelTester(self )
__lowercase : int = ConfigTester(self , config_class=__a , hidden_size=64 , num_attention_heads=8 )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase : Dict = None
self.model_tester.create_and_check_model_as_decoder(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowerCAmelCase ( self : int , __a : int ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Tuple = ids_tensor([1, 10] , config.vocab_size )
__lowercase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : List[Any] = GPTNeoXModel(__a )
original_model.to(__a )
original_model.eval()
__lowercase : Union[str, Any] = original_model(__a ).last_hidden_state
__lowercase : Tuple = original_model(__a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
__lowercase : str = GPTNeoXModel(__a )
scaled_model.to(__a )
scaled_model.eval()
__lowercase : List[Any] = scaled_model(__a ).last_hidden_state
__lowercase : List[str] = scaled_model(__a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__a , __a , atol=1E-5 ) )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : int = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase : List[Any] = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__a )
__lowercase : Union[str, Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase : Any = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase : Tuple = model.generate(**__a , do_sample=__a , max_new_tokens=20 )
__lowercase : List[Any] = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : Union[str, Any] = args.pruning_method
__lowercase : Any = args.threshold
__lowercase : Tuple = args.model_name_or_path.rstrip("""/""" )
__lowercase : Any = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
__lowercase : Optional[Any] = torch.load(os.path.join(lowerCAmelCase_ , """pytorch_model.bin""" ) )
__lowercase : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowercase : Optional[int] = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
__lowercase : List[Any] = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
__lowercase : str = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
__lowercase : Optional[int] = MagnitudeBinarizer.apply(inputs=lowerCAmelCase_ , threshold=lowerCAmelCase_ )
__lowercase : int = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowercase : List[str] = name[:-6]
__lowercase : Tuple = model[F"{prefix_}mask_scores"]
__lowercase : Any = TopKBinarizer.apply(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : int = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowercase : str = name[:-6]
__lowercase : Optional[int] = model[F"{prefix_}mask_scores"]
__lowercase : Dict = ThresholdBinarizer.apply(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowercase : Optional[Any] = name[:-6]
__lowercase : List[Any] = model[F"{prefix_}mask_scores"]
__lowercase , __lowercase : Dict = -0.1, 1.1
__lowercase : Tuple = torch.sigmoid(lowerCAmelCase_ )
__lowercase : str = s * (r - l) + l
__lowercase : int = s_bar.clamp(min=0.0 , max=1.0 )
__lowercase : Tuple = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowercase : str = os.path.join(
os.path.dirname(lowerCAmelCase_ ) , F"bertarized_{os.path.basename(lowerCAmelCase_ )}" )
if not os.path.isdir(lowerCAmelCase_ ):
shutil.copytree(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\nCreated folder {target_model_path}" )
torch.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
lowerCamelCase : Dict = parser.parse_args()
main(args)
| 649
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 1
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , __a : List[Any] , __a : Optional[int]=13 , __a : Tuple=7 , __a : Any=True , __a : Dict=True , __a : str=True , __a : Union[str, Any]=True , __a : Dict=99 , __a : List[str]=32 , __a : int=5 , __a : Union[str, Any]=4 , __a : Tuple=37 , __a : List[str]="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Optional[Any]=512 , __a : Tuple=16 , __a : List[Any]=2 , __a : Optional[int]=0.02 , __a : Dict=4 , ) -> int:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Optional[int] = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : str = is_training
__lowercase : List[str] = use_attention_mask
__lowercase : Any = use_token_type_ids
__lowercase : Optional[int] = use_labels
__lowercase : Union[str, Any] = vocab_size
__lowercase : str = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[int] = initializer_range
__lowercase : Optional[int] = num_choices
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_attention_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : Tuple = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Tuple = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : str = model_class_name.from_pretrained("""albert-base-v2""" )
__lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
__lowercase : Any = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : int = model(__a , attention_mask=__a )[0]
__lowercase : Dict = (1, 11, 768)
self.assertEqual(output.shape , __a )
__lowercase : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 649
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : Union[str, Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowercase : List[Any] = True
for i in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase : Any = False
for i in range(1 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase : Dict = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : List[Any] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 649
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 1
|
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Any = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowercase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
lowerCamelCase : List[Any] = input('''Enter Video/IGTV url: ''').strip()
lowerCamelCase : Any = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 649
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 1
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase : List[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
lowerCamelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
__lowercase : List[Any] = """https://pypi.org/pypi/diffusers/json"""
__lowercase : Tuple = json.loads(request.urlopen(lowerCAmelCase_ ).read() )["""releases"""].keys()
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : version.Version(lowerCAmelCase_ ) )
def snake_case_ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Any = Path(lowerCAmelCase_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] ):
init_hf_modules()
__lowercase : List[str] = Path(lowerCAmelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Union[str, Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[Any] = f.read()
# Imports of the form `import .xxx`
__lowercase : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : List[str] = False
__lowercase : Union[str, Any] = [module_file]
__lowercase : int = []
# Let's recurse through all relative imports
while not no_change:
__lowercase : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase_ ) )
__lowercase : List[Any] = Path(lowerCAmelCase_ ).parent
__lowercase : Tuple = [str(module_path / m ) for m in new_imports]
__lowercase : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
__lowercase : str = [F"{f}.py" for f in new_import_files]
__lowercase : Union[str, Any] = len(lowerCAmelCase_ ) == 0
all_relative_imports.extend(lowerCAmelCase_ )
return all_relative_imports
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowercase : List[Any] = f.read()
# Imports of the form `import xxx`
__lowercase : Dict = re.findall("""^\s*import\s+(\S+)\s*$""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , lowerCAmelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
__lowercase : Union[str, Any] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowercase : Optional[int] = list(set(lowerCAmelCase_ ) )
__lowercase : Tuple = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase_ )
except ImportError:
missing_packages.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(lowerCAmelCase_ )}. Run `pip install {' '.join(lowerCAmelCase_ )}`" )
return get_relative_imports(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : str = module_path.replace(os.path.sep , """.""" )
__lowercase : Optional[Any] = importlib.import_module(lowerCAmelCase_ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase_ )
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
from ..pipelines import DiffusionPipeline
__lowercase : Optional[int] = dict(inspect.getmembers(lowerCAmelCase_ , inspect.isclass ) )
__lowercase : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
__lowercase : Tuple = cls
return pipeline_class
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , ):
__lowercase : Dict = str(lowerCAmelCase_ )
__lowercase : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
__lowercase : Dict = module_file_or_url
__lowercase : Union[str, Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowercase : List[str] = get_diffusers_versions()
# cut ".dev0"
__lowercase : List[str] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowercase : Optional[int] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__lowercase : List[Any] = F"v{revision}"
elif revision == "main":
__lowercase : Tuple = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__lowercase : Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase_ , pipeline=lowerCAmelCase_ )
try:
__lowercase : Optional[int] = cached_download(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , )
__lowercase : Any = """git"""
__lowercase : Tuple = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__lowercase : Optional[int] = hf_hub_download(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , )
__lowercase : List[str] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__lowercase : str = check_imports(lowerCAmelCase_ )
# Now we move the module inside our cached dynamic modules.
__lowercase : Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase_ )
__lowercase : List[Any] = Path(lowerCAmelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
__lowercase : Any = F"{module_needed}.py"
shutil.copy(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = use_auth_token
elif use_auth_token is True:
__lowercase : Optional[int] = HfFolder.get_token()
else:
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = model_info(lowerCAmelCase_ , revision=lowerCAmelCase_ , token=lowerCAmelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowercase : Union[str, Any] = submodule_path / commit_hash
__lowercase : int = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase_ , F"{module_needed}.py" , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
return os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, os.PathLike] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict[str, str]] = None , lowerCAmelCase_ : Optional[Union[bool, str]] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : str , ):
__lowercase : Tuple = get_cached_module_file(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
return get_class_in_module(lowerCAmelCase_ , final_module.replace(""".py""" , """""" ) )
| 649
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : int = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['''CLIPFeatureExtractor''']
lowerCamelCase : List[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """width_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : List[Any] , __a : Tuple=13 , __a : List[str]=64 , __a : List[str]=2 , __a : str=3 , __a : Union[str, Any]="swish" , __a : List[str]=3 , __a : str=32 , __a : List[str]=0.1 , __a : List[str]=0.02 , __a : Any=True , __a : Optional[Any]=True , __a : List[Any]=10 , __a : Any=None , __a : Optional[Any]=0.25 , __a : Optional[int]=0.0 , __a : Dict=0.0 , ) -> List[str]:
"""simple docstring"""
__lowercase : int = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : str = image_size
__lowercase : Optional[Any] = patch_size
__lowercase : Any = num_channels
__lowercase : int = make_divisible(512 * width_multiplier , divisor=8 )
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[Any] = conv_kernel_size
__lowercase : Optional[int] = output_stride
__lowercase : List[Any] = classifier_dropout_prob
__lowercase : Tuple = use_labels
__lowercase : List[Any] = is_training
__lowercase : Dict = num_labels
__lowercase : Optional[int] = initializer_range
__lowercase : int = scope
__lowercase : Optional[int] = width_multiplier
__lowercase : Any = ffn_dropout
__lowercase : Optional[int] = attn_dropout
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Any = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase ( self : Optional[int] , __a : Tuple , __a : Tuple , __a : Any , __a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = MobileViTVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[Any] , __a : str , __a : Optional[Any] , __a : int ) -> str:
"""simple docstring"""
__lowercase : Any = self.num_labels
__lowercase : Dict = MobileViTVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , __a : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.num_labels
__lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = config_and_inputs
__lowercase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : str = False
_A : List[str] = False
_A : Any = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = MobileViTVaModelTester(self )
__lowercase : List[str] = MobileViTVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Optional[Any] = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
def check_hidden_states_output(__a : Tuple , __a : Any , __a : List[str] ):
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Optional[int] = outputs.hidden_states
__lowercase : Optional[int] = 5
self.assertEqual(len(__a ) , __a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase : List[str] = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Union[str, Any] = MobileViTVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__a )
__lowercase : Optional[int] = self.default_image_processor
__lowercase : List[Any] = prepare_img()
__lowercase : Dict = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : List[Any] = model(**__a )
# verify the logits
__lowercase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Union[str, Any] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Tuple = model.to(__a )
__lowercase : List[str] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Union[str, Any] = prepare_img()
__lowercase : Union[str, Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Union[str, Any] = outputs.logits
# verify the logits
__lowercase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
__lowercase : List[str] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Dict = model.to(__a )
__lowercase : str = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__lowercase : Tuple = prepare_img()
__lowercase : List[str] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : List[str] = model(**__a )
__lowercase : Optional[Any] = outputs.logits.detach().cpu()
__lowercase : Any = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
__lowercase : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
__lowercase : Dict = image_processor.post_process_semantic_segmentation(outputs=__a )
__lowercase : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a )
| 649
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 1
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCamelCase : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __a : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : Tuple = torchvision.models.resnetaaa(pretrained=__a )
__lowercase : int = list(model.children() )[:-2]
__lowercase : Tuple = nn.Sequential(*__a )
__lowercase : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCAmelCase ( self : str , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.pool(self.model(__a ) )
__lowercase : Optional[Any] = torch.flatten(__a , start_dim=2 )
__lowercase : Dict = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : int , __a : int ) -> List[str]:
"""simple docstring"""
__lowercase : str = [json.loads(__a ) for l in open(__a )]
__lowercase : Optional[int] = os.path.dirname(__a )
__lowercase : Dict = tokenizer
__lowercase : Optional[Any] = labels
__lowercase : Dict = len(__a )
__lowercase : Optional[int] = max_seq_length
__lowercase : Optional[int] = transforms
def __len__( self : int ) -> Optional[Any]:
"""simple docstring"""
return len(self.data )
def __getitem__( self : Tuple , __a : int ) -> List[Any]:
"""simple docstring"""
__lowercase : int = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__a ) )
__lowercase , __lowercase , __lowercase : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1]
__lowercase : Any = sentence[: self.max_seq_length]
__lowercase : str = torch.zeros(self.n_classes )
__lowercase : Optional[Any] = 1
__lowercase : int = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__lowercase : Optional[int] = self.transforms(__a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[Any] = [len(row["""sentence"""] ) for row in batch]
__lowercase , __lowercase : Union[str, Any] = len(lowerCAmelCase_ ), max(lowerCAmelCase_ )
__lowercase : List[Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
__lowercase : Optional[Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
__lowercase : Tuple = input_row["""sentence"""]
__lowercase : Dict = 1
__lowercase : Dict = torch.stack([row["""image"""] for row in batch] )
__lowercase : Union[str, Any] = torch.stack([row["""label"""] for row in batch] )
__lowercase : List[Any] = torch.stack([row["""image_start_token"""] for row in batch] )
__lowercase : Union[str, Any] = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case_ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case_ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 649
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
# Load configuration defined in the metadata file
with open(lowerCAmelCase_ ) as metadata_file:
__lowercase : Tuple = json.load(lowerCAmelCase_ )
__lowercase : Dict = LukeConfig(use_entity_aware_attention=lowerCAmelCase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
__lowercase : str = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
__lowercase : str = load_original_entity_vocab(lowerCAmelCase_ )
# add an entry for [MASK2]
__lowercase : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowercase : Tuple = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
__lowercase : str = AddedToken("""<ent>""" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
__lowercase : Any = AddedToken("""<ent2>""" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """r""" ) as f:
__lowercase : Union[str, Any] = json.load(lowerCAmelCase_ )
__lowercase : List[str] = """MLukeTokenizer"""
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = MLukeTokenizer.from_pretrained(lowerCAmelCase_ )
# Initialize the embeddings of the special tokens
__lowercase : List[Any] = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
__lowercase : Any = state_dict["""embeddings.word_embeddings.weight"""]
__lowercase : str = word_emb[ent_init_index].unsqueeze(0 )
__lowercase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
__lowercase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowercase : Optional[Any] = state_dict[bias_name]
__lowercase : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
__lowercase : int = decoder_bias[enta_init_index].unsqueeze(0 )
__lowercase : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowercase : List[str] = F"encoder.layer.{layer_index}.attention.self."
__lowercase : Optional[Any] = state_dict[prefix + matrix_name]
__lowercase : Tuple = state_dict[prefix + matrix_name]
__lowercase : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowercase : List[str] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
__lowercase : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__lowercase : Optional[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowercase : Optional[Any] = state_dict["""entity_predictions.bias"""]
__lowercase : Optional[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
__lowercase : Tuple = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowercase : Any = LukeForMaskedLM(config=lowerCAmelCase_ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
__lowercase : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
__lowercase : List[str] = state_dict[key]
else:
__lowercase : Union[str, Any] = state_dict[key]
__lowercase , __lowercase : Union[str, Any] = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
if set(lowerCAmelCase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(lowerCAmelCase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowercase : List[str] = MLukeTokenizer.from_pretrained(lowerCAmelCase_ , task="""entity_classification""" )
__lowercase : Optional[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
__lowercase : Any = (0, 9)
__lowercase : Dict = tokenizer(lowerCAmelCase_ , entity_spans=[span] , return_tensors="""pt""" )
__lowercase : int = model(**lowerCAmelCase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowercase : List[str] = torch.Size((1, 33, 768) )
__lowercase : Dict = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowercase : Any = torch.Size((1, 1, 768) )
__lowercase : Optional[Any] = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowercase : List[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase_ )
__lowercase : Optional[Any] = """Tokyo is the capital of <mask>."""
__lowercase : Optional[Any] = (24, 30)
__lowercase : Dict = tokenizer(lowerCAmelCase_ , entity_spans=[span] , return_tensors="""pt""" )
__lowercase : Tuple = model(**lowerCAmelCase_ )
__lowercase : Any = encoding["""input_ids"""][0].tolist()
__lowercase : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
__lowercase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase_ )
__lowercase : List[Any] = outputs.entity_logits[0][0].argmax().item()
__lowercase : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCAmelCase_ ) )
model.save_pretrained(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : Any = ["""[MASK]""", """[PAD]""", """[UNK]"""]
__lowercase : Tuple = [json.loads(lowerCAmelCase_ ) for line in open(lowerCAmelCase_ )]
__lowercase : Optional[int] = {}
for entry in data:
__lowercase : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowercase : List[Any] = entity_id
break
__lowercase : Optional[int] = F"{language}:{entity_name}"
__lowercase : Any = entity_id
return new_mapping
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
lowerCamelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 649
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : str = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = '''switch_transformers'''
_A : int = ['''past_key_values''']
_A : Optional[int] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , __a : List[str]=32128 , __a : Optional[int]=768 , __a : Union[str, Any]=64 , __a : Union[str, Any]=2048 , __a : Dict=64 , __a : Optional[Any]=12 , __a : str=3 , __a : Dict=12 , __a : List[str]=3 , __a : str=12 , __a : str=8 , __a : Any=False , __a : str=0.01 , __a : Any="float32" , __a : Any=False , __a : Dict=32 , __a : Dict=128 , __a : List[str]=0.1 , __a : Any=1E-6 , __a : Any=0.001 , __a : Tuple=0.001 , __a : Dict=1.0 , __a : List[str]="relu" , __a : int=True , __a : Optional[int]=False , __a : List[str]=True , __a : str=0 , __a : Optional[Any]=1 , **__a : int , ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = vocab_size
__lowercase : Tuple = d_model
__lowercase : int = d_kv
__lowercase : Tuple = d_ff
__lowercase : int = num_sparse_encoder_layers
__lowercase : int = num_layers
__lowercase : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowercase : Tuple = self.num_layers // self.num_sparse_encoder_layers
else:
__lowercase : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowercase : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowercase : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowercase : List[Any] = num_heads
__lowercase : Any = num_experts
__lowercase : Union[str, Any] = expert_capacity
__lowercase : Optional[int] = router_bias
__lowercase : Tuple = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : str = router_dtype
__lowercase : List[str] = router_ignore_padding_tokens
__lowercase : List[Any] = relative_attention_num_buckets
__lowercase : Tuple = relative_attention_max_distance
__lowercase : str = dropout_rate
__lowercase : Tuple = layer_norm_epsilon
__lowercase : str = initializer_factor
__lowercase : Optional[Any] = feed_forward_proj
__lowercase : Optional[int] = use_cache
__lowercase : List[str] = add_router_probs
__lowercase : int = router_z_loss_coef
__lowercase : List[Any] = router_aux_loss_coef
__lowercase : int = self.feed_forward_proj.split("""-""" )
__lowercase : Dict = act_info[-1]
__lowercase : List[Any] = act_info[0] == """gated"""
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowercase : str = """gelu_new"""
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
| 649
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 1
|
def snake_case_ ( lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 1000000 ):
__lowercase : Dict = 0
__lowercase : Optional[Any] = 1
for current_denominator in range(1 , limit + 1 ):
__lowercase : Optional[int] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase : Any = current_numerator
__lowercase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 649
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : int ):
__lowercase , __lowercase : int = position
__lowercase : Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__lowercase : Union[str, Any] = []
for position in positions:
__lowercase , __lowercase : int = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def snake_case_ ( lowerCAmelCase_ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase , __lowercase : Union[str, Any] = position
if board[y][x] == 0:
__lowercase : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
__lowercase : List[str] = 0
return False
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Optional[Any] = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__lowercase : Dict = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
__lowercase : Optional[int] = 0
__lowercase : List[Any] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''encoder-decoder'''
_A : Optional[Any] = True
def __init__( self : List[str] , **__a : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(**__a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowercase : List[Any] = kwargs.pop("""encoder""" )
__lowercase : List[str] = encoder_config.pop("""model_type""" )
__lowercase : Optional[Any] = kwargs.pop("""decoder""" )
__lowercase : Optional[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__lowercase : int = AutoConfig.for_model(__a , **__a )
__lowercase : Dict = AutoConfig.for_model(__a , **__a )
__lowercase : Union[str, Any] = True
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Tuple ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowercase : Dict = True
__lowercase : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__a )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = copy.deepcopy(self.__dict__ )
__lowercase : Optional[Any] = self.encoder.to_dict()
__lowercase : Optional[int] = self.decoder.to_dict()
__lowercase : Tuple = self.__class__.model_type
return output
| 649
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = IFInpaintingSuperResolutionPipeline
_A : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_A : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Any=0 ) -> Optional[int]:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : Dict = torch.manual_seed(__a )
else:
__lowercase : Any = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
__lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowercase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 649
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase : Any = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCamelCase : Optional[Any] = '''▁'''
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __a : str , __a : List[Any]=True , __a : List[Any]=True , __a : List[str]=False , __a : Any="[CLS]" , __a : Tuple="[SEP]" , __a : Tuple="<unk>" , __a : str="[SEP]" , __a : Tuple="<pad>" , __a : Tuple="[CLS]" , __a : Dict="[MASK]" , __a : Optional[Dict[str, Any]] = None , **__a : List[str] , ) -> None:
"""simple docstring"""
__lowercase : Dict = (
AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a )
if isinstance(__a , __a )
else mask_token
)
__lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : List[Any] = do_lower_case
__lowercase : str = remove_space
__lowercase : Optional[Any] = keep_accents
__lowercase : Any = vocab_file
__lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.__dict__.copy()
__lowercase : Dict = None
return state
def __setstate__( self : Dict , __a : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> int:
"""simple docstring"""
if self.remove_space:
__lowercase : Union[str, Any] = """ """.join(inputs.strip().split() )
else:
__lowercase : Any = inputs
__lowercase : Optional[int] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase : Optional[int] = unicodedata.normalize("""NFKD""" , __a )
__lowercase : Optional[Any] = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__lowercase : str = outputs.lower()
return outputs
def lowerCAmelCase ( self : Tuple , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.preprocess_text(__a )
__lowercase : Optional[Any] = self.sp_model.encode(__a , out_type=__a )
__lowercase : Optional[Any] = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase : Optional[int] = cur_pieces[1:]
else:
__lowercase : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def lowerCAmelCase ( self : Tuple , __a : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : Dict = """"""
__lowercase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowercase : Optional[Any] = True
__lowercase : List[str] = []
else:
current_sub_tokens.append(__a )
__lowercase : Optional[Any] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[int] = [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : List[str] = [self.sep_token_id]
__lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 649
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 1
|
import os
lowerCamelCase : Dict = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = 0
__lowercase : str = 0
while index < len(lowerCAmelCase_ ) - 1:
__lowercase : List[Any] = SYMBOLS[numerals[index]]
__lowercase : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Tuple = """"""
__lowercase : List[str] = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase : List[Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase : List[str] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def snake_case_ ( lowerCAmelCase_ : str = "/p089_roman.txt" ):
__lowercase : Optional[Any] = 0
with open(os.path.dirname(lowerCAmelCase_ ) + roman_numerals_filename ) as filea:
__lowercase : Any = filea.readlines()
for line in lines:
__lowercase : Optional[int] = line.strip()
__lowercase : str = parse_roman_numerals(lowerCAmelCase_ )
__lowercase : List[str] = generate_roman_numerals(lowerCAmelCase_ )
savings += len(lowerCAmelCase_ ) - len(lowerCAmelCase_ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 649
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __a : List[Any] , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : Optional[int]=True , __a : str=True , __a : Dict=True , __a : Dict=True , __a : List[Any]=99 , __a : Optional[int]=32 , __a : Tuple=5 , __a : Any=4 , __a : List[str]=37 , __a : List[str]="gelu" , __a : int=0.1 , __a : int=0.1 , __a : Any=512 , __a : List[str]=16 , __a : Tuple=2 , __a : Dict=0.02 , __a : List[str]=4 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Optional[int] = batch_size
__lowercase : Optional[Any] = seq_length
__lowercase : Any = is_training
__lowercase : Optional[Any] = use_attention_mask
__lowercase : List[str] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : int = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Tuple = type_vocab_size
__lowercase : Optional[int] = type_sequence_label_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[Any] = num_choices
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_attention_mask:
__lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Any = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = config_and_inputs
__lowercase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[Any] = config_and_inputs
__lowercase : Union[str, Any] = True
__lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = True
_A : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : str = FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__a )
__lowercase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
| 649
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Optional[Any] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCamelCase : Tuple = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase ( _snake_case ):
'''simple docstring'''
def __init__( self : str , *__a : Tuple , __a : List[Any]=None , __a : Optional[int]=None , __a : List[str]=None , **__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase : Optional[int] = eval_examples
__lowercase : List[Any] = post_process_function
__lowercase : int = quant_trainer_args
__lowercase : Any = 128 # default number of calibration samples
def lowerCAmelCase ( self : int , __a : Dict=None ) -> Optional[int]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
__lowercase : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
__lowercase : Optional[int] = self._remove_unused_columns(lowerCAmelCase__ , description="""Calibration""" )
return DataLoader(
lowerCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCAmelCase__ , )
def lowerCAmelCase ( self : Optional[int] , __a : Any=None ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.train_dataset if calib_dataset is None else calib_dataset
__lowercase : Tuple = self.get_calib_dataloader(lowerCAmelCase__ )
__lowercase : List[Any] = self.model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args , calib=lowerCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(lowerCAmelCase__ )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(lowerCAmelCase__ ):
# Prediction step
__lowercase , __lowercase , __lowercase : Dict = self.prediction_step(lowerCAmelCase__ , lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCAmelCase__ , self.quant_trainer_args )
__lowercase : Dict = model
def lowerCAmelCase ( self : List[str] , __a : Optional[Any]=None , __a : int=None , __a : Tuple=None , __a : str = "eval" ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase : List[Any] = self.get_eval_dataloader(lowerCAmelCase__ )
__lowercase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Any = self.compute_metrics
__lowercase : List[Any] = None
__lowercase : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : Union[str, Any] = eval_loop(
lowerCAmelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
__lowercase : List[Any] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__lowercase : Union[str, Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions )
__lowercase : List[Any] = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
__lowercase : Dict = metrics.pop(lowerCAmelCase__ )
self.log(lowerCAmelCase__ )
else:
__lowercase : List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase : Union[str, Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__ )
return metrics
def lowerCAmelCase ( self : List[str] , __a : Any , __a : int , __a : str=None , __a : str = "test" ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.get_test_dataloader(lowerCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Optional[int] = self.compute_metrics
__lowercase : Optional[int] = None
__lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase : List[Any] = eval_loop(
lowerCAmelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , )
finally:
__lowercase : Optional[int] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase : Tuple = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , output.predictions , """predict""" )
__lowercase : List[str] = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
__lowercase : Optional[int] = metrics.pop(lowerCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__ )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int]="./" ) -> Optional[int]:
"""simple docstring"""
__lowercase : Dict = self.eval_dataset
__lowercase : Any = self.get_eval_dataloader(lowerCAmelCase__ )
__lowercase : Union[str, Any] = next(iter(lowerCAmelCase__ ) )
# saving device - to make it consistent
__lowercase : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
__lowercase : List[Any] = tuple(v.to(lowerCAmelCase__ ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
__lowercase : str = True
__lowercase : Tuple = self.model.to(lowerCAmelCase__ )
model.eval()
model.float()
__lowercase : Optional[Any] = model.module if hasattr(lowerCAmelCase__ , """module""" ) else model
quant_trainer.configure_model(lowerCAmelCase__ , self.quant_trainer_args )
__lowercase : Optional[Any] = os.path.join(lowerCAmelCase__ , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
__lowercase : List[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , export_params=lowerCAmelCase__ , opset_version=13 , do_constant_folding=lowerCAmelCase__ , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCAmelCase__ , )
logger.info("""onnx export finished""" )
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase : str = TaTokenizerFast
lowerCamelCase : List[str] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase : List[str] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCamelCase : Optional[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCamelCase : List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCamelCase : List[str] = BeautifulSoup(res.text, '''html.parser''')
lowerCamelCase : List[str] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : List[Any] = analyze_text(_lowercase )
__lowercase : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__lowercase : Union[str, Any] = sum(single_char_strings.values() )
# one length string
__lowercase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__lowercase : Any = single_char_strings[ch]
__lowercase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__lowercase : str = sum(two_char_strings.values() )
__lowercase : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__lowercase : Optional[Any] = cha + cha
if sequence in two_char_strings:
__lowercase : int = two_char_strings[sequence]
__lowercase : Optional[int] = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : Optional[Any] = Counter() # type: ignore
__lowercase : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
_A : List[Any] = '''dpr'''
def __init__( self : str , __a : List[str]=30522 , __a : Optional[Any]=768 , __a : int=12 , __a : Tuple=12 , __a : Any=3072 , __a : str="gelu" , __a : Dict=0.1 , __a : Tuple=0.1 , __a : int=512 , __a : Dict=2 , __a : Union[str, Any]=0.02 , __a : Any=1E-12 , __a : Optional[Any]=0 , __a : Tuple="absolute" , __a : int = 0 , **__a : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__lowercase : Dict = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Tuple = hidden_act
__lowercase : List[str] = intermediate_size
__lowercase : str = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : int = type_vocab_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : str = layer_norm_eps
__lowercase : List[Any] = projection_dim
__lowercase : Optional[Any] = position_embedding_type
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , *__a : Optional[int] , **__a : str ) -> Tuple:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
_A : Optional[int] = 'trajectory_transformer'
_A : int = ['past_key_values']
_A : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , __a : List[Any]=100 , __a : List[str]=5 , __a : str=1 , __a : Optional[Any]=1 , __a : Tuple=249 , __a : List[Any]=6 , __a : int=17 , __a : int=25 , __a : str=4 , __a : Union[str, Any]=4 , __a : Optional[Any]=128 , __a : List[str]=0.1 , __a : Dict=0.1 , __a : str=0.1 , __a : int=0.0006 , __a : Optional[int]=512 , __a : Tuple=0.02 , __a : Tuple=1E-12 , __a : int=1 , __a : Any=True , __a : Any=1 , __a : str=50256 , __a : Optional[Any]=50256 , **__a : Dict , ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = vocab_size
__lowercase : Union[str, Any] = action_weight
__lowercase : Any = reward_weight
__lowercase : str = value_weight
__lowercase : List[str] = max_position_embeddings
__lowercase : Dict = block_size
__lowercase : Union[str, Any] = action_dim
__lowercase : Tuple = observation_dim
__lowercase : Any = transition_dim
__lowercase : Optional[int] = learning_rate
__lowercase : Optional[int] = n_layer
__lowercase : Tuple = n_head
__lowercase : int = n_embd
__lowercase : List[str] = embd_pdrop
__lowercase : Optional[Any] = attn_pdrop
__lowercase : Tuple = resid_pdrop
__lowercase : List[str] = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : int = kaiming_initializer_range
__lowercase : int = use_cache
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : Tuple = 16
lowerCamelCase : List[str] = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase : Union[str, Any] = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__lowercase : str = 8
else:
__lowercase : Optional[Any] = None
return tokenizer.pad(
__A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
__lowercase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Tuple = mocked_dataloaders # noqa: F811
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1":
__lowercase : Union[str, Any] = 2
# Initialize accelerator
__lowercase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Dict = config['''lr''']
__lowercase : Tuple = int(config["""num_epochs"""] )
__lowercase : List[Any] = int(config["""seed"""] )
__lowercase : Tuple = int(config["""batch_size"""] )
__lowercase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__A )
def inner_training_loop(lowerCAmelCase_ : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowercase : Any = AdamW(params=model.parameters() , lr=__A )
__lowercase : Tuple = get_dataloaders(__A , __A )
# Instantiate scheduler
__lowercase : Any = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=100 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase : List[Any] = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase : List[str] = model(**__A )
__lowercase : int = outputs.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : List[Any] = model(**__A )
__lowercase : Optional[Any] = outputs.logits.argmax(dim=-1 )
__lowercase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A , references=__A , )
__lowercase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __A )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case_ ( ):
__lowercase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowercase : Tuple = parser.parse_args()
__lowercase : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple , **__a : List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(_lowerCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , **__a : int ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = {}
__lowercase : Tuple = {}
__lowercase : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowercase : str = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__lowercase : Optional[int] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__lowercase : List[Any] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__lowercase : List[str] = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__lowercase : Optional[int] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowercase : List[str] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__lowercase : str = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__lowercase : List[Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__lowercase : Tuple = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__lowercase : Tuple = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__lowercase : Any = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__lowercase : Tuple = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : int , __a : Optional[int] , *__a : Tuple , __a : Optional[Any]=None , __a : int=None , **__a : int ) -> List[str]:
"""simple docstring"""
return super().__call__(_lowerCAmelCase , *_lowerCAmelCase , num_workers=_lowerCAmelCase , batch_size=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : Optional[Any]=64 , __a : Any = 0 , __a : List[str] = 512 / 1500 , __a : List[Any] = 32 , __a : Optional[Any] = 1 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = load_image(_lowerCAmelCase )
__lowercase : Optional[Any] = self.image_processor.size["""longest_edge"""]
__lowercase , __lowercase , __lowercase , __lowercase : Tuple = self.image_processor.generate_crop_boxes(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase : Optional[int] = self.image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__lowercase : Union[str, Any] = self.get_inference_context()
with inference_context():
__lowercase : str = self._ensure_tensor_on_device(_lowerCAmelCase , device=self.device )
__lowercase : Optional[int] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__lowercase : Dict = image_embeddings
__lowercase : str = grid_points.shape[1]
__lowercase : Any = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
__lowercase : Optional[int] = grid_points[:, i : i + points_per_batch, :, :]
__lowercase : int = input_labels[:, i : i + points_per_batch]
__lowercase : Optional[int] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase ( self : Any , __a : Tuple , __a : Optional[Any]=0.88 , __a : List[str]=0.95 , __a : str=0 , __a : Dict=1 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = model_inputs.pop("""input_boxes""" )
__lowercase : Dict = model_inputs.pop("""is_last""" )
__lowercase : Any = model_inputs.pop("""original_sizes""" ).tolist()
__lowercase : Union[str, Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__lowercase : Dict = self.model(**_lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowercase : Dict = model_outputs["""pred_masks"""]
__lowercase : str = self.image_processor.post_process_masks(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , binarize=_lowerCAmelCase )
__lowercase : Any = model_outputs["""iou_scores"""]
__lowercase , __lowercase , __lowercase : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Optional[Any]=False , __a : Union[str, Any]=False , __a : Dict=0.7 , ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = []
__lowercase : Any = []
__lowercase : List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__lowercase : Any = torch.cat(_lowerCAmelCase )
__lowercase : List[Any] = torch.cat(_lowerCAmelCase )
__lowercase , __lowercase , __lowercase , __lowercase : int = self.image_processor.post_process_for_mask_generation(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase : Optional[int] = defaultdict(_lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_lowerCAmelCase )
__lowercase : int = {}
if output_rle_mask:
__lowercase : str = rle_mask
if output_bboxes_mask:
__lowercase : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
import os
def snake_case_ ( lowerCAmelCase_ : int = "input.txt" ):
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as input_file:
__lowercase : str = [
[int(lowerCamelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__lowercase : Dict = len(lowerCamelCase__ )
__lowercase : Optional[int] = len(matrix[0] )
__lowercase : int = [[-1 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
__lowercase : List[str] = matrix[i][0]
for j in range(1 , lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
__lowercase : Optional[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCamelCase__ ):
__lowercase : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__lowercase : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 649
| 0
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = DebertaTokenizer
_A : Optional[Any] = True
_A : str = DebertaTokenizerFast
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
__lowercase : str = dict(zip(__A , range(len(__A ) ) ) )
__lowercase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowercase : List[Any] = {"unk_token": "[UNK]"}
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__A ) )
def lowerCAmelCase ( self : str , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def lowerCAmelCase ( self : str , __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = "lower newer"
__lowercase : Any = "lower newer"
return input_text, output_text
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = self.get_tokenizer()
__lowercase : Any = "lower newer"
__lowercase : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__lowercase : List[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
__lowercase : Dict = tokens + [tokenizer.unk_token]
__lowercase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.get_tokenizer()
__lowercase : List[Any] = tokenizer("""Hello""" , """World""" )
__lowercase : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , __A )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowercase : int = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
__lowercase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
__lowercase : Any = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__A , add_prefix_space=__A )
__lowercase : Optional[int] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__A , add_prefix_space=__A )
__lowercase : str = tokenizer.build_inputs_with_special_tokens(__A )
__lowercase : Tuple = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowercase : List[str] = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
__lowercase : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
__lowercase : Dict = tokenizer(__A , padding=__A )
__lowercase : Optional[int] = [tokenizer.decode(__A , skip_special_tokens=__A ) for seq in encoding["input_ids"]]
# fmt: off
__lowercase : Tuple = {
"input_ids": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowercase : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __A )
for expected, decoded in zip(__A , __A ):
self.assertEqual(__A , __A )
| 711
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : int = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 649
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = ShapEImgaImgPipeline
_A : Tuple = ["""image"""]
_A : Any = ["""image"""]
_A : int = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
_A : Tuple = False
@property
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return 8
@property
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowercase : Optional[int] = CLIPVisionModel(_a )
return model
@property
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Any = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : List[str] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowercase : Optional[Any] = PriorTransformer(**_a )
return model
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowercase : List[str] = ShapERenderer(**_a )
return model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.dummy_prior
__lowercase : Union[str, Any] = self.dummy_image_encoder
__lowercase : List[str] = self.dummy_image_processor
__lowercase : Dict = self.dummy_renderer
__lowercase : Optional[int] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
__lowercase : List[str] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase ( self : Dict , __a : int , __a : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
__lowercase : Optional[int] = torch.manual_seed(_a )
else:
__lowercase : Tuple = torch.Generator(device=_a ).manual_seed(_a )
__lowercase : Optional[int] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : List[Any] = """cpu"""
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : int = self.pipeline_class(**_a )
__lowercase : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__lowercase : List[str] = pipe(**self.get_dummy_inputs(_a ) )
__lowercase : Optional[int] = output.images[0]
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowercase : Optional[Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = torch_device == """cpu"""
__lowercase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Dict = self.get_dummy_components()
__lowercase : Union[str, Any] = self.pipeline_class(**_a )
__lowercase : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__lowercase : Optional[Any] = 1
__lowercase : Optional[Any] = 2
__lowercase : Any = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
__lowercase : Dict = batch_size * [inputs[key]]
__lowercase : Optional[int] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowercase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowercase : Optional[int] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowercase : Any = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__lowercase : Any = torch.Generator(device=_a ).manual_seed(0 )
__lowercase : Dict = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 712
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 649
| 0
|
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase : Any = get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
_A : Tuple = 'dummy_data'
_A : Dict = 'datasets'
_A : int = False
def __init__( self : List[str] , __a : str , __a : str , __a : Union[Version, str] , __a : Optional[str] = None , __a : bool = False , __a : bool = True , __a : Optional[List[Callable]] = None , ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = 0
__lowercase : str = dataset_name
__lowercase : Optional[Any] = cache_dir
__lowercase : Union[str, Any] = use_local_dummy_data
__lowercase : Tuple = config
# download_callbacks take a single url as input
__lowercase : List[Any] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase : str = str(__a )
# to be downloaded
__lowercase : int = None
__lowercase : str = None
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase : Union[str, Any] = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase : int = cached_path(
__a , cache_dir=self.cache_dir , extract_compressed_file=__a , force_extract=__a )
return os.path.join(__a , self.dummy_file_name )
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self._bucket_url is None:
__lowercase : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , *__a : Any ) -> int:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase : str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__a , __a ):
return self.create_dummy_data_dict(__a , __a )
elif isinstance(__a , (list, tuple) ):
return self.create_dummy_data_list(__a , __a )
else:
return self.create_dummy_data_single(__a , __a )
def lowerCAmelCase ( self : Any , __a : str , *__a : Any ) -> Optional[Any]:
"""simple docstring"""
return self.download_and_extract(__a )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(__a )
def lowerCAmelCase ( self : Dict , __a : Union[str, Any] , *__a : Optional[int] , **__a : Any ) -> Optional[int]:
"""simple docstring"""
return path
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return {}
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__a , __a ):
for single_url in single_urls:
download_callback(__a )
else:
__lowercase : Tuple = single_urls
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__a , __a ):
__lowercase : Optional[int] = [os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) ) for x in single_urls]
else:
__lowercase : List[Any] = single_urls
__lowercase : Any = os.path.join(__a , urllib.parse.quote_plus(Path(__a ).name ) )
__lowercase : Any = value
# make sure that values are unique
if all(isinstance(__a , __a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Any , __a : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase : str = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , __a ) ) for url in data_url )
__lowercase : Union[str, Any] = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase : Optional[int] = [data_url[0]] * len(__a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase : str = os.path.join(__a , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(__a )
return dummy_data_list
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase : Optional[int] = os.path.join(__a , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(__a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int , __a : Tuple ) -> Any:
"""simple docstring"""
def _iter_archive_members(__a : Union[str, Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase : List[Any] = Path(self.dummy_file ).parent
__lowercase : Optional[Any] = path.relative_to(__a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__a )
__lowercase : int = Path(__a )
__lowercase : List[str] = _iter_archive_members(__a ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(__a ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(__a , __a ):
__lowercase : List[str] = [paths]
for path in paths:
if os.path.isfile(__a ):
if os.path.basename(__a ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__a ):
if os.path.basename(__a ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(__a ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(__a , __a )
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''layoutlmv3'''
def __init__( self : Dict , __a : List[str]=50265 , __a : str=768 , __a : List[Any]=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : Optional[Any]="gelu" , __a : Optional[int]=0.1 , __a : List[Any]=0.1 , __a : Tuple=512 , __a : int=2 , __a : Any=0.02 , __a : Union[str, Any]=1E-5 , __a : List[str]=1 , __a : List[Any]=0 , __a : int=2 , __a : str=1024 , __a : str=128 , __a : List[Any]=128 , __a : Tuple=True , __a : Optional[int]=32 , __a : Any=128 , __a : List[Any]=64 , __a : Tuple=256 , __a : str=True , __a : int=True , __a : Optional[Any]=True , __a : Any=224 , __a : str=3 , __a : List[str]=16 , __a : Union[str, Any]=None , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
vocab_size=__a , hidden_size=__a , num_hidden_layers=__a , num_attention_heads=__a , intermediate_size=__a , hidden_act=__a , hidden_dropout_prob=__a , attention_probs_dropout_prob=__a , max_position_embeddings=__a , type_vocab_size=__a , initializer_range=__a , layer_norm_eps=__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
__lowercase : int = max_ad_position_embeddings
__lowercase : Any = coordinate_size
__lowercase : Optional[Any] = shape_size
__lowercase : str = has_relative_attention_bias
__lowercase : int = rel_pos_bins
__lowercase : Union[str, Any] = max_rel_pos
__lowercase : str = has_spatial_attention_bias
__lowercase : str = rel_ad_pos_bins
__lowercase : List[Any] = max_rel_ad_pos
__lowercase : Tuple = text_embed
__lowercase : int = visual_embed
__lowercase : Tuple = input_size
__lowercase : Dict = num_channels
__lowercase : str = patch_size
__lowercase : Optional[int] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = version.parse('''1.12''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : "ProcessorMixin" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , __a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase : Tuple = processor.tokenizer.num_special_tokens_to_add(__a )
__lowercase : Tuple = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__lowercase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase : Tuple = self._generate_dummy_images(__a , __a , __a , __a )
__lowercase : int = dict(
processor(
__a , text=__a , boxes=__a , return_tensors=__a , ) )
return inputs
| 649
| 0
|
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , __a : bool = True , __a : bool = False ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = scheduler
__lowercase : Optional[int] = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase : Optional[int] = split_batches
__lowercase : List[str] = step_with_optimizer
__lowercase : Any = GradientState()
def lowerCAmelCase ( self : int , *__a : str , **__a : int ) -> Optional[int]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase : str = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.scheduler.get_last_lr()
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return self.scheduler.state_dict()
def lowerCAmelCase ( self : str , __a : str ) -> Optional[Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.scheduler.get_lr()
def lowerCAmelCase ( self : Optional[Any] , *__a : Any , **__a : Any ) -> Optional[int]:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 714
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str = None , __a : uuid.UUID = None , __a : Any=None , __a : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
__lowercase : Any = uuid.uuida()
if past_user_inputs is None:
__lowercase : Dict = []
if generated_responses is None:
__lowercase : Dict = []
__lowercase : uuid.UUID = conversation_id
__lowercase : List[str] = past_user_inputs
__lowercase : List[str] = generated_responses
__lowercase : Optional[str] = text
def __eq__( self : Dict , __a : Dict ) -> Any:
"""simple docstring"""
if not isinstance(__a , __a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCAmelCase ( self : List[str] , __a : str , __a : bool = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase : Optional[int] = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase : Dict = text
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase : Dict = None
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> List[Any]:
"""simple docstring"""
self.generated_responses.append(__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : int ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase : Optional[Any] = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__a , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , *__a : int , **__a : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__a , **__a )
if self.tokenizer.pad_token_id is None:
__lowercase : List[Any] = self.tokenizer.eos_token
def lowerCAmelCase ( self : Union[str, Any] , __a : int=None , __a : Tuple=None , __a : Any=None , **__a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = {}
__lowercase : Tuple = {}
__lowercase : List[str] = {}
if min_length_for_response is not None:
__lowercase : Dict = min_length_for_response
if minimum_tokens is not None:
__lowercase : Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase : Union[str, Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __a : Union[Conversation, List[Conversation]] , __a : Dict=0 , **__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = super().__call__(__a , num_workers=__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
def lowerCAmelCase ( self : Union[str, Any] , __a : Conversation , __a : Tuple=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
__lowercase : List[Any] = self.tokenizer._build_conversation_input_ids(__a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase : Tuple = self._legacy_parse_and_tokenize(__a )
if self.framework == "pt":
__lowercase : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase : List[str] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCAmelCase ( self : Any , __a : Dict , __a : Any=10 , **__a : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
__lowercase : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase : Any = max_length - minimum_tokens
__lowercase : int = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase : Dict = model_inputs["""attention_mask"""][:, -trim:]
__lowercase : Union[str, Any] = model_inputs.pop("""conversation""" )
__lowercase : Tuple = max_length
__lowercase : int = self.model.generate(**__a , **__a )
if self.model.config.is_encoder_decoder:
__lowercase : Optional[int] = 1
else:
__lowercase : str = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCAmelCase ( self : int , __a : Tuple , __a : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase : int = model_outputs["""output_ids"""]
__lowercase : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
__lowercase : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__a )
return conversation
def lowerCAmelCase ( self : int , __a : Conversation ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = self.tokenizer.eos_token_id
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__a , add_special_tokens=__a ) )
if len(__a ) > self.tokenizer.model_max_length:
__lowercase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 649
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowerCamelCase : Optional[Any] = {
"gpt2": 10_24,
"gpt2-medium": 10_24,
"gpt2-large": 10_24,
"gpt2-xl": 10_24,
"distilgpt2": 10_24,
}
class lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
_A : str = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_A : int = GPTaTokenizer
def __init__( self : Any , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : int=None , __a : Tuple="<|endoftext|>" , __a : Optional[Any]="<|endoftext|>" , __a : List[str]="<|endoftext|>" , __a : Optional[int]=False , **__a : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
__lowercase : Optional[Any] = kwargs.pop("""add_bos_token""" , _UpperCamelCase )
__lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _UpperCamelCase ) != add_prefix_space:
__lowercase : Optional[int] = getattr(_UpperCamelCase , pre_tok_state.pop("""type""" ) )
__lowercase : Dict = add_prefix_space
__lowercase : List[Any] = pre_tok_class(**_UpperCamelCase )
__lowercase : Optional[Any] = add_prefix_space
def lowerCAmelCase ( self : List[Any] , *__a : Optional[int] , **__a : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
__lowercase : int = kwargs.get("""is_split_into_words""" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase ( self : Dict , *__a : Dict , **__a : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase : List[Any] = kwargs.get("""is_split_into_words""" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def lowerCAmelCase ( self : Optional[int] , __a : "Conversation" ) -> List[int]:
"""simple docstring"""
__lowercase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
__lowercase : str = input_ids[-self.model_max_length :]
return input_ids
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase : Dict = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase : Optional[Any] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
if "://" in dataset_path:
__lowercase : int = dataset_path.split("""://""" )[1]
return dataset_path
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] ):
__lowercase : List[Any] = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def snake_case_ ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase : Optional[int] = None
__lowercase : List[str] = None
__lowercase : Optional[int] = threading.Lock()
| 716
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def snake_case_ ( lowerCAmelCase_ : bool = True , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
__lowercase : List[str] = False
if main_process_only:
__lowercase : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 649
| 0
|
import datasets
lowerCamelCase : int = '''\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
'''
lowerCamelCase : Optional[Any] = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
lowerCamelCase : Tuple = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : List[str] , __a : Dict , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
| 717
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] ):
if not nums:
return 0
__lowercase : Tuple = nums[0]
__lowercase : Tuple = 0
for num in nums[1:]:
__lowercase , __lowercase : List[str] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 649
| 0
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowercase : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def snake_case_ ( lowerCAmelCase_ : int ):
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def snake_case_ ( lowerCAmelCase_ : int ):
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__lowercase : Optional[int] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def snake_case_ ( lowerCAmelCase_ : int ):
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 718
|
lowerCamelCase : List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 649
| 0
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __a : Union[str, Any] , __a : Optional[int]=7 , __a : Optional[Any]=3 , __a : Optional[Any]=18 , __a : Any=30 , __a : int=400 , __a : str=True , __a : Optional[Any]=None , __a : Optional[Any]=True , ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : Optional[int] = parent
__lowercase : str = batch_size
__lowercase : Dict = num_channels
__lowercase : Any = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Union[str, Any] = do_resize
__lowercase : Optional[int] = size
__lowercase : Union[str, Any] = do_normalize
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """clusters""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
__lowercase : Any = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __UpperCamelCase )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Dict = os.path.join(__UpperCamelCase , """image_processor.json""" )
image_processor_first.to_json_file(__UpperCamelCase )
__lowercase : str = self.image_processing_class.from_json_file(__UpperCamelCase ).to_dict()
__lowercase : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __UpperCamelCase )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__UpperCamelCase )
__lowercase : str = self.image_processing_class.from_pretrained(__UpperCamelCase ).to_dict()
__lowercase : str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __UpperCamelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def snake_case_ ( ):
__lowercase : Union[str, Any] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowercase : Dict = Image.open(dataset[4]["""file"""] )
__lowercase : List[Any] = Image.open(dataset[5]["""file"""] )
__lowercase : Tuple = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowercase : List[str] = prepare_images()
# test non-batched
__lowercase : Dict = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__lowercase : List[str] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __UpperCamelCase )
# test batched
__lowercase : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__lowercase : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __UpperCamelCase )
| 719
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ):
__lowercase : Any = """backbone.""" if is_semantic else """"""
__lowercase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", """beit.embeddings.cls_token"""),
(F"{prefix}patch_embed.proj.weight", """beit.embeddings.patch_embeddings.projection.weight"""),
(F"{prefix}patch_embed.proj.bias", """beit.embeddings.patch_embeddings.projection.bias"""),
(F"{prefix}pos_embed", """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
__lowercase : Tuple = """backbone.""" if is_semantic else """"""
# queries, keys and values
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
__lowercase : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
__lowercase : List[str] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : Union[str, Any] = q_bias
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__lowercase : int = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
__lowercase : str = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
__lowercase : List[str] = gamma_a
__lowercase : Optional[int] = gamma_a
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = dct.pop(lowerCAmelCase_ )
__lowercase : Tuple = val
def snake_case_ ( ):
__lowercase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : Any = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=False ):
__lowercase : Dict = False if """rvlcdip""" in checkpoint_url else True
__lowercase : Tuple = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase_ , use_mask_token=lowerCAmelCase_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[int] = 4096
__lowercase : List[Any] = 24
__lowercase : Dict = 16
# labels
if "rvlcdip" in checkpoint_url:
__lowercase : Optional[int] = 16
__lowercase : Any = """huggingface/label-files"""
__lowercase : Union[str, Any] = """rvlcdip-id2label.json"""
__lowercase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
__lowercase : Union[str, Any] = create_rename_keys(lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , has_lm_head=lowerCAmelCase_ )
# load HuggingFace model
__lowercase : Dict = BeitForMaskedImageModeling(lowerCAmelCase_ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
__lowercase : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding["""pixel_values"""]
__lowercase : str = model(lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits
# verify logits
__lowercase : str = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(lowerCAmelCase_ ), "Shape of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
if has_lm_head:
__lowercase : Optional[Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
__lowercase : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase_ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase_ , lowerCAmelCase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 649
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = XLMRobertaTokenizer
_A : Dict = XLMRobertaTokenizerFast
_A : Tuple = True
_A : Optional[int] = True
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : str = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : int = """<pad>"""
__lowercase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 1002 )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = XLMRobertaTokenizer(snake_case_ , keep_accents=snake_case_ )
__lowercase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : int = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowercase : int = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase : List[str] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowercase : str = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowercase : Union[str, Any] = tempfile.mkdtemp()
__lowercase : List[Any] = tokenizer_r.save_pretrained(snake_case_ )
__lowercase : Union[str, Any] = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__lowercase : List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
__lowercase : Dict = tokenizer_r.from_pretrained(snake_case_ )
__lowercase : Union[str, Any] = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=True
__lowercase : Union[str, Any] = tempfile.mkdtemp()
__lowercase : str = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
__lowercase : Optional[Any] = tokenizer_p.save_pretrained(snake_case_ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case_ , snake_case_ )
# Checks everything loads correctly in the same way
__lowercase : int = tokenizer_r.from_pretrained(snake_case_ )
__lowercase : Any = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
# Save tokenizer rust, legacy_format=False
__lowercase : List[Any] = tempfile.mkdtemp()
__lowercase : str = tokenizer_r.save_pretrained(snake_case_ , legacy_format=snake_case_ )
__lowercase : int = tokenizer_p.save_pretrained(snake_case_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase : str = tokenizer_r.from_pretrained(snake_case_ )
__lowercase : Tuple = tokenizer_p.from_pretrained(snake_case_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case_ , snake_case_ ) )
shutil.rmtree(snake_case_ )
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case_ , f.name )
__lowercase : Optional[Any] = XLMRobertaTokenizer(f.name , keep_accents=snake_case_ )
__lowercase : Union[str, Any] = pickle.dumps(snake_case_ )
pickle.loads(snake_case_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = self.get_rust_tokenizer()
__lowercase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowercase : str = tokenizer.tokenize(snake_case_ )
__lowercase : Optional[int] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowercase : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
__lowercase : str = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : Union[str, Any] = tokenizer.encode(snake_case_ )
__lowercase : Optional[int] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = """Hello World!"""
__lowercase : List[str] = [0, 35378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : List[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
179459,
124850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
10114,
711,
152,
20,
6,
5,
22376,
642,
1221,
15190,
34153,
450,
5608,
959,
1119,
57702,
136,
186,
47,
1098,
29367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
50901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 720
|
from torch import nn
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , __a : int , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
__lowercase : int = class_size
__lowercase : int = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__lowercase : str = nn.Linear(__a , __a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.mlp(__a )
return logits
| 649
| 0
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCamelCase : List[str] = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCamelCase : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCamelCase : str = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ):
return float((preds == labels).mean() )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str="binary" ):
__lowercase : Optional[Any] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
__lowercase : Dict = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : List[Any] = {}
for id_pred, label in zip(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[int] = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
__lowercase : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowercase : str = [(pred, label)]
__lowercase : List[str] = [], []
for question, preds_labels in question_map.items():
__lowercase : Optional[Any] = zip(*__UpperCamelCase )
__lowercase : int = fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase , average="""macro""" )
fas.append(__UpperCamelCase )
__lowercase : Dict = int(sum(pred == label for pred, label in preds_labels ) == len(__UpperCamelCase ) )
ems.append(__UpperCamelCase )
__lowercase : Optional[int] = float(sum(__UpperCamelCase ) / len(__UpperCamelCase ) )
__lowercase : Any = sum(__UpperCamelCase ) / len(__UpperCamelCase )
__lowercase : int = float(fa_score(y_true=__UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : Tuple ) -> List[str]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
__lowercase : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__lowercase : Dict = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 721
|
import fire
from utils import calculate_rouge, save_json
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : str ):
__lowercase : Tuple = [x.strip() for x in open(lowerCAmelCase_ ).readlines()]
__lowercase : Dict = [x.strip() for x in open(lowerCAmelCase_ ).readlines()][: len(lowerCAmelCase_ )]
__lowercase : Tuple = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
if save_path is not None:
save_json(lowerCAmelCase_ , lowerCAmelCase_ , indent=lowerCAmelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 649
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase ( _lowercase ):
'''simple docstring'''
_A : List[str] = '''mobilenet_v1'''
def __init__( self : str , __a : Optional[Any]=3 , __a : int=224 , __a : int=1.0 , __a : Optional[Any]=8 , __a : int="relu6" , __a : Any=True , __a : str=0.999 , __a : Union[str, Any]=0.02 , __a : List[Any]=0.001 , **__a : Tuple , ) -> int:
"""simple docstring"""
super().__init__(**A_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : List[str] = depth_multiplier
__lowercase : str = min_depth
__lowercase : Any = hidden_act
__lowercase : Union[str, Any] = tf_padding
__lowercase : Dict = classifier_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Any = layer_norm_eps
class lowerCAmelCase ( _lowercase ):
'''simple docstring'''
_A : Any = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : Optional[Any] ) -> float:
"""simple docstring"""
return 1E-4
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : int = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
_A : Optional[int] = 'mra'
def __init__( self : Optional[int] , __a : Any=50265 , __a : List[Any]=768 , __a : Optional[int]=12 , __a : List[str]=12 , __a : List[str]=3072 , __a : Optional[int]="gelu" , __a : Tuple=0.1 , __a : Dict=0.1 , __a : Tuple=512 , __a : str=1 , __a : int=0.02 , __a : Optional[int]=1E-5 , __a : List[Any]="absolute" , __a : Union[str, Any]=4 , __a : int="full" , __a : Optional[int]=0 , __a : Tuple=0 , __a : str=1 , __a : int=0 , __a : List[Any]=2 , **__a : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : Optional[Any] = vocab_size
__lowercase : List[str] = max_position_embeddings
__lowercase : List[str] = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Optional[Any] = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[Any] = initializer_range
__lowercase : Optional[int] = type_vocab_size
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Optional[Any] = position_embedding_type
__lowercase : Optional[int] = block_per_row
__lowercase : int = approx_mode
__lowercase : str = initial_prior_first_n_blocks
__lowercase : List[Any] = initial_prior_diagonal_n_blocks
| 701
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : Optional[int]=16 , __a : Optional[Any]=13 , __a : str=7 , __a : List[str]=14 , __a : Any=10 , __a : str=19 , __a : int=5 , __a : Any=4 , __a : List[Any]=True , __a : Tuple=16 , __a : Dict=2 , __a : Tuple=4 , __a : int=4 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : List[str]=0.1 , __a : int=[1, 2, 3, 4, 5] , __a : str=25 , __a : Any=5 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = d_model
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = prediction_length
__lowercase : List[str] = context_length
__lowercase : Any = cardinality
__lowercase : str = num_time_features
__lowercase : Optional[int] = lags_sequence
__lowercase : Optional[Any] = embedding_dimension
__lowercase : List[Any] = is_training
__lowercase : List[str] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : int = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : str = context_length
__lowercase : int = prediction_length + label_length
__lowercase : Union[str, Any] = label_length
__lowercase : Optional[int] = moving_average
__lowercase : Optional[Any] = autocorrelation_factor
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase ( self : Tuple , __a : str ) -> int:
"""simple docstring"""
__lowercase : Any = config.context_length + max(config.lags_sequence )
__lowercase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowercase : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] )
__lowercase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowercase : Dict = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowercase : str = floats_tensor([self.batch_size, config.prediction_length] )
__lowercase : List[str] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_config()
__lowercase : Any = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple , __a : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel(config=__a ).to(__a ).eval()
__lowercase : Optional[int] = model(**__a )
__lowercase : Dict = outputs.encoder_last_hidden_state
__lowercase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[str] = model.get_encoder()
encoder.save_pretrained(__a )
__lowercase : List[str] = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = model.create_network_inputs(**__a )
__lowercase , __lowercase : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowercase : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowercase : Union[str, Any] = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__lowercase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowercase : Optional[int] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowercase : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowercase : Dict = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Optional[Any] = model.get_decoder()
decoder.save_pretrained(__a )
__lowercase : Tuple = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowercase : str = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_A : List[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_A : Any = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_A : Dict = False
_A : Tuple = False
_A : Optional[int] = False
_A : Tuple = False
_A : str = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : List[str] = AutoformerModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase , __lowercase : Tuple = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = inspect.signature(getattr(__a , """forward""" ) )
# The main input is the name of the argument after `self`
__lowercase : Optional[int] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Dict = model_class(__a )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Any = [*signature.parameters.keys()]
__lowercase : int = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase , __lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , """seq_length""" , __a )
__lowercase : Union[str, Any] = getattr(self.model_tester , """decoder_seq_length""" , __a )
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """d_model""" , __a )
__lowercase : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , __a )
__lowercase : Any = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowercase : Dict = True
__lowercase : List[str] = False
__lowercase : Optional[int] = True
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Dict = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowercase : Tuple = len(__a )
__lowercase : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowercase : List[Any] = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowercase : Optional[int] = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowercase : Tuple = True
__lowercase : Union[str, Any] = True
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowercase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def snake_case_ ( lowerCAmelCase_ : Optional[int]="train-batch.pt" ):
__lowercase : Dict = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCAmelCase_ , repo_type="""dataset""" )
__lowercase : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[Any] = prepare_batch()
with torch.no_grad():
__lowercase : Tuple = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowercase : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowercase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowercase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__a )
__lowercase : Optional[int] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowercase : int = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowercase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowercase : Optional[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__a )
__lowercase : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1E-1 ) )
| 649
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''cvt'''
def __init__( self : Optional[Any] , __a : Optional[int]=3 , __a : Optional[Any]=[7, 3, 3] , __a : List[Any]=[4, 2, 2] , __a : Union[str, Any]=[2, 1, 1] , __a : Dict=[64, 192, 384] , __a : str=[1, 3, 6] , __a : List[str]=[1, 2, 10] , __a : Optional[int]=[4.0, 4.0, 4.0] , __a : List[str]=[0.0, 0.0, 0.0] , __a : Any=[0.0, 0.0, 0.0] , __a : Dict=[0.0, 0.0, 0.1] , __a : Union[str, Any]=[True, True, True] , __a : int=[False, False, True] , __a : Optional[int]=["dw_bn", "dw_bn", "dw_bn"] , __a : Optional[int]=[3, 3, 3] , __a : Dict=[1, 1, 1] , __a : Tuple=[2, 2, 2] , __a : List[str]=[1, 1, 1] , __a : Optional[Any]=[1, 1, 1] , __a : Any=0.02 , __a : List[str]=1E-12 , **__a : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Any = num_channels
__lowercase : Union[str, Any] = patch_sizes
__lowercase : int = patch_stride
__lowercase : List[str] = patch_padding
__lowercase : List[Any] = embed_dim
__lowercase : Dict = num_heads
__lowercase : Optional[int] = depth
__lowercase : str = mlp_ratio
__lowercase : Union[str, Any] = attention_drop_rate
__lowercase : Optional[int] = drop_rate
__lowercase : Any = drop_path_rate
__lowercase : Optional[int] = qkv_bias
__lowercase : Union[str, Any] = cls_token
__lowercase : int = qkv_projection_method
__lowercase : List[Any] = kernel_qkv
__lowercase : Optional[int] = padding_kv
__lowercase : str = stride_kv
__lowercase : List[str] = padding_q
__lowercase : List[str] = stride_q
__lowercase : List[Any] = initializer_range
__lowercase : int = layer_norm_eps
| 702
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
_A : Union[str, Any] = '''deberta-v2'''
def __init__( self : Dict , __a : Dict=128100 , __a : Any=1536 , __a : Union[str, Any]=24 , __a : Optional[int]=24 , __a : Any=6144 , __a : List[str]="gelu" , __a : List[str]=0.1 , __a : int=0.1 , __a : Dict=512 , __a : Optional[int]=0 , __a : Union[str, Any]=0.02 , __a : str=1E-7 , __a : Union[str, Any]=False , __a : str=-1 , __a : str=0 , __a : Optional[Any]=True , __a : Union[str, Any]=None , __a : Any=0 , __a : Any="gelu" , **__a : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(**__A )
__lowercase : Optional[int] = hidden_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Tuple = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Tuple = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : int = initializer_range
__lowercase : List[Any] = relative_attention
__lowercase : Dict = max_relative_positions
__lowercase : Any = pad_token_id
__lowercase : Any = position_biased_input
# Backwards compatibility
if type(__A ) == str:
__lowercase : List[Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__lowercase : List[str] = pos_att_type
__lowercase : Tuple = vocab_size
__lowercase : List[str] = layer_norm_eps
__lowercase : List[str] = kwargs.get("""pooler_hidden_size""" , __A )
__lowercase : Tuple = pooler_dropout
__lowercase : str = pooler_hidden_act
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase : List[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[Any] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , __a : "PreTrainedTokenizerBase" = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 703
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = LongformerTokenizer
_A : int = True
_A : Optional[int] = LongformerTokenizerFast
_A : int = True
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowercase : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase : Optional[int] = {"""unk_token""": """<unk>"""}
__lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Optional[int] , **__a : Optional[Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple , **__a : Tuple ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """lower newer"""
__lowercase : int = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """lower newer"""
__lowercase : Optional[Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase : str = tokenizer.tokenize(__a ) # , add_prefix_space=True)
self.assertListEqual(__a , __a )
__lowercase : int = tokens + [tokenizer.unk_token]
__lowercase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
__lowercase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__a )
__lowercase : List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Tuple = """Encode this sequence."""
__lowercase : Optional[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__a , __a )
__lowercase : List[str] = tokenizer.encode(__a , add_special_tokens=__a , add_prefix_space=__a )
__lowercase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__a , __a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowercase : str = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__a , __a )
# Testing spaces after special tokens
__lowercase : List[Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__a , lstrip=__a , rstrip=__a )} ) # mask token has a left space
__lowercase : Dict = tokenizer.convert_tokens_to_ids(__a )
__lowercase : List[str] = """Encode <mask> sequence"""
__lowercase : List[str] = """Encode <mask>sequence"""
__lowercase : Union[str, Any] = tokenizer.encode(__a )
__lowercase : Dict = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__a , __a )
__lowercase : int = tokenizer.encode(__a )
__lowercase : Union[str, Any] = encoded.index(__a )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__a , __a )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(__a , **__a )
__lowercase : Optional[Any] = """A, <mask> AllenNLP sentence."""
__lowercase : Union[str, Any] = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowercase : Optional[Any] = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowercase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowercase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __a )
self.assertEqual(post_processor_state["""trim_offsets"""] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : int = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Any = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ) + 1, len(__a ) + 1 + len(__a )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : str = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : int = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__a ), len(__a ) + 1 + len(__a )) , )
__lowercase : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : str = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ) + 1, 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Dict = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
__lowercase : int = self.rust_tokenizer_class.from_pretrained(
__a , use_fast=__a , add_prefix_space=__a , trim_offsets=__a )
__lowercase : Tuple = tokenizer_r(__a , return_offsets_mapping=__a , add_special_tokens=__a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__a ), 1 + len(__a ) + 1 + len(__a )) , )
| 649
| 0
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : List[str] = Mapping[str, np.ndarray]
lowerCamelCase : Any = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : List[Any] = 0.01
@dataclasses.dataclass(frozen=lowercase__ )
class lowerCAmelCase :
'''simple docstring'''
_A : int = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_A : Dict = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_A : int = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_A : str = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_A : Optional[Any] = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_A : List[Any] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_A : List[str] = None
# Templates used to generate this protein (prediction-only)
_A : Optional[int] = None
# Chain corresponding to each parent
_A : Dict = None
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[str] = r"(\[[A-Z]+\]\n)"
__lowercase : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0]
__lowercase : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__lowercase : List[str] = ["N", "CA", "C"]
__lowercase : Optional[int] = None
__lowercase : Union[str, Any] = None
__lowercase : List[str] = None
for g in groups:
if "[PRIMARY]" == g[0]:
__lowercase : List[str] = g[1][0].strip()
for i in range(len(_lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
__lowercase : Dict = "X" # FIXME: strings are immutable
__lowercase : Tuple = np.array(
[residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__lowercase : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) )
__lowercase : List[Any] = np.array(_lowerCamelCase )
__lowercase : str = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__lowercase : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__lowercase : str = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__lowercase : Optional[int] = np.zeros(
(
len(_lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__lowercase : int = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , )
def snake_case_ ( lowerCAmelCase_ : Protein , lowerCAmelCase_ : int = 0 ):
__lowercase : List[str] = []
__lowercase : int = prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
__lowercase : str = prot.parents
__lowercase : int = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__lowercase : Union[str, Any] = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id]
if parents is None or len(_lowerCamelCase ) == 0:
__lowercase : List[str] = ["N/A"]
pdb_headers.append(F"PARENT {' '.join(_lowerCamelCase )}" )
return pdb_headers
def snake_case_ ( lowerCAmelCase_ : Protein , lowerCAmelCase_ : str ):
__lowercase : List[str] = []
__lowercase : List[str] = pdb_str.split("""\n""" )
__lowercase : Optional[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
__lowercase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__lowercase : Any = []
if prot.parents_chain_index is not None:
__lowercase : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowerCamelCase ) , [] )
parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase )
__lowercase : List[str] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__lowercase : Tuple = parent_dict.get(str(_lowerCamelCase ) , ["""N/A"""] )
parents_per_chain.append(_lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__lowercase : List[Any] = [["N/A"]]
def make_parent_line(lowerCAmelCase_ : Sequence[str] ) -> str:
return F"PARENT {' '.join(_lowerCamelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__lowercase : List[Any] = 0
for i, l in enumerate(_lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowerCamelCase ):
__lowercase : Union[str, Any] = parents_per_chain[chain_counter]
else:
__lowercase : Dict = ["N/A"]
out_pdb_lines.append(make_parent_line(_lowerCamelCase ) )
return "\n".join(_lowerCamelCase )
def snake_case_ ( lowerCAmelCase_ : Protein ):
__lowercase : Union[str, Any] = residue_constants.restypes + ["X"]
def res_atoa(lowerCAmelCase_ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__lowercase : str = residue_constants.atom_types
__lowercase : List[str] = []
__lowercase : Dict = prot.atom_mask
__lowercase : Tuple = prot.aatype
__lowercase : str = prot.atom_positions
__lowercase : str = prot.residue_index.astype(np.intaa )
__lowercase : Tuple = prot.b_factors
__lowercase : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__lowercase : int = get_pdb_headers(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
pdb_lines.extend(_lowerCamelCase )
__lowercase : str = aatype.shape[0]
__lowercase : Union[str, Any] = 1
__lowercase : Union[str, Any] = 0
__lowercase : Union[str, Any] = string.ascii_uppercase
__lowercase : Dict = None
# Add all atom sites.
for i in range(_lowerCamelCase ):
__lowercase : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__lowercase : Optional[Any] = "ATOM"
__lowercase : List[Any] = atom_name if len(_lowerCamelCase ) == 4 else F" {atom_name}"
__lowercase : Tuple = ""
__lowercase : Tuple = ""
__lowercase : Optional[Any] = 1.00
__lowercase : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
__lowercase : List[str] = ""
__lowercase : Any = "A"
if chain_index is not None:
__lowercase : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__lowercase : Optional[int] = (
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
__lowercase : Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__lowercase : Optional[Any] = True
__lowercase : Any = chain_index[i + 1]
if should_terminate:
# Close the chain.
__lowercase : Tuple = "TER"
__lowercase : List[Any] = (
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_lowerCamelCase )
def snake_case_ ( lowerCAmelCase_ : Protein ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case_ ( lowerCAmelCase_ : FeatureDict , lowerCAmelCase_ : ModelOutput , lowerCAmelCase_ : Optional[np.ndarray] = None , lowerCAmelCase_ : Optional[np.ndarray] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[Sequence[str]] = None , lowerCAmelCase_ : Optional[Sequence[int]] = None , ):
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
| 704
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Dict , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Dict=True , __a : Dict=True , __a : Any=True , __a : List[str]=True , __a : int=99 , __a : Optional[int]=32 , __a : str=2 , __a : int=4 , __a : List[str]=37 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=512 , __a : int=16 , __a : Union[str, Any]=2 , __a : Union[str, Any]=0.02 , __a : List[str]=3 , __a : Dict=4 , __a : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = parent
__lowercase : Tuple = 13
__lowercase : Dict = 7
__lowercase : List[Any] = True
__lowercase : Tuple = True
__lowercase : List[str] = True
__lowercase : Any = True
__lowercase : Optional[int] = 99
__lowercase : str = 384
__lowercase : Optional[Any] = 2
__lowercase : Dict = 4
__lowercase : str = 37
__lowercase : Optional[int] = """gelu"""
__lowercase : int = 0.1
__lowercase : Union[str, Any] = 0.1
__lowercase : Tuple = 512
__lowercase : Tuple = 16
__lowercase : Optional[int] = 2
__lowercase : Optional[Any] = 0.02
__lowercase : Dict = 3
__lowercase : Union[str, Any] = 4
__lowercase : Tuple = 128
__lowercase : Optional[Any] = 2
__lowercase : int = 9
__lowercase : List[Any] = 1
__lowercase : Union[str, Any] = None
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[Any] = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : str = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict , __a : List[Any] , __a : List[str] , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TFConvBertModel(config=__a )
__lowercase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = [input_ids, input_mask]
__lowercase : Dict = model(__a )
__lowercase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Dict , __a : str ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : int , __a : Any , __a : Optional[int] , __a : int , __a : int , __a : List[Any] , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFConvBertForSequenceClassification(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[str] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : Optional[Any] , __a : int , __a : Optional[int] , __a : Tuple , __a : int , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = self.num_choices
__lowercase : Dict = TFConvBertForMultipleChoice(config=__a )
__lowercase : List[str] = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : int = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : List[str] , __a : str , __a : List[str] , __a : List[str] , __a : List[str] , __a : Any , __a : Tuple , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Tuple = TFConvBertForTokenClassification(config=__a )
__lowercase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[int] , __a : List[str] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFConvBertForQuestionAnswering(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Union[str, Any] = False
_A : List[str] = False
_A : Dict = False
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : int = TFConvBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Union[str, Any] = True
__lowercase : List[Any] = True
if hasattr(__a , """use_cache""" ):
__lowercase : Optional[Any] = True
__lowercase : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , """key_length""" , __a )
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Tuple = model_class(__a )
__lowercase : Tuple = len(model(__a ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a , saved_model=__a )
__lowercase : List[Any] = os.path.join(__a , """saved_model""" , """1""" )
__lowercase : str = tf.keras.models.load_model(__a )
__lowercase : Optional[int] = model(__a )
if self.is_encoder_decoder:
__lowercase : Union[str, Any] = outputs["""encoder_hidden_states"""]
__lowercase : Union[str, Any] = outputs["""encoder_attentions"""]
else:
__lowercase : Union[str, Any] = outputs["""hidden_states"""]
__lowercase : List[str] = outputs["""attentions"""]
self.assertEqual(len(__a ) , __a )
__lowercase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[str] = True
__lowercase : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__lowercase : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , """key_length""" , __a )
__lowercase : List[Any] = getattr(self.model_tester , """key_length""" , __a )
def check_decoder_attentions_output(__a : List[str] ):
__lowercase : Union[str, Any] = len(__a )
self.assertEqual(out_len % 2 , 0 )
__lowercase : Any = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__a : str ):
__lowercase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : int = True
__lowercase : Any = False
__lowercase : List[Any] = model_class(__a )
__lowercase : Tuple = model(self._prepare_for_class(__a , __a ) )
__lowercase : Dict = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
__lowercase : Any = model_class(__a )
__lowercase : List[str] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Dict = True
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = model_class(__a )
__lowercase : Optional[int] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : Tuple = model(__a )[0]
__lowercase : Any = [1, 6, 768]
self.assertEqual(output.shape , __a )
__lowercase : Optional[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1E-4 )
| 649
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A : Union[str, Any] = (CMStochasticIterativeScheduler,)
_A : Union[str, Any] = 10
def lowerCAmelCase ( self : Dict , **__a : int ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**_a )
return config
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = 10
__lowercase : Optional[Any] = self.get_scheduler_config()
__lowercase : Union[str, Any] = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
__lowercase : List[str] = scheduler.timesteps[0]
__lowercase : Union[str, Any] = scheduler.timesteps[1]
__lowercase : Optional[Any] = self.dummy_sample
__lowercase : int = 0.1 * sample
__lowercase : Optional[Any] = scheduler.step(_a , _a , _a ).prev_sample
__lowercase : str = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = self.scheduler_classes[0]
__lowercase : Any = self.get_scheduler_config()
__lowercase : List[Any] = scheduler_class(**_a )
__lowercase : Optional[int] = 1
scheduler.set_timesteps(_a )
__lowercase : List[str] = scheduler.timesteps
__lowercase : Tuple = torch.manual_seed(0 )
__lowercase : Dict = self.dummy_model()
__lowercase : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
__lowercase : Union[str, Any] = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
__lowercase : List[Any] = model(_a , _a )
# 3. predict previous sample x_t-1
__lowercase : Any = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__lowercase : str = pred_prev_sample
__lowercase : int = torch.sum(torch.abs(_a ) )
__lowercase : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.scheduler_classes[0]
__lowercase : Tuple = self.get_scheduler_config()
__lowercase : List[Any] = scheduler_class(**_a )
__lowercase : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=_a )
__lowercase : List[Any] = scheduler.timesteps
__lowercase : Any = torch.manual_seed(0 )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowercase : Any = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
__lowercase : Tuple = model(_a , _a )
# 3. predict previous sample x_t-1
__lowercase : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__lowercase : Any = pred_prev_sample
__lowercase : List[str] = torch.sum(torch.abs(_a ) )
__lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : Union[str, Any] = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_a )
__lowercase : Tuple = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config()
__lowercase : List[str] = scheduler_class(**_a )
__lowercase : Union[str, Any] = [39, 30, 12, 1, 0]
__lowercase : int = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Optional[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**_a )
__lowercase : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 705
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , *__a : Dict , **__a : Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 649
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = TypeVar('''DatasetType''', Dataset, IterableDataset)
def snake_case_ ( lowerCAmelCase_ : List[DatasetType] , lowerCAmelCase_ : Optional[List[float]] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[DatasetInfo] = None , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(snake_case__ )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case__ ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}." )
if i == 0:
__lowercase : int = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def snake_case_ ( lowerCAmelCase_ : List[DatasetType] , lowerCAmelCase_ : Optional[DatasetInfo] = None , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : int = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(snake_case__ )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(snake_case__ ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}." )
if i == 0:
__lowercase : Dict = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
| 706
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : Dict = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase : List[str] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase : Tuple = tempfile.mkdtemp()
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : str = os.path.join(self.tmpdirname , __a )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
# load decoder from hub
__lowercase : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCAmelCase ( self : Optional[Any] , **__a : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : str , **__a : int ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : Any = self.get_feature_extractor()
__lowercase : str = self.get_decoder()
__lowercase : Tuple = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__a , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : int = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : List[Any] = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.get_feature_extractor()
__lowercase : int = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = """This is a test string"""
__lowercase : Any = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : str , __a : Tuple=(2, 10, 16) , __a : int=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__a )
return np.random.rand(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : str = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase : Optional[Any] = processor.decode(__a )
__lowercase : Any = decoder.decode_beams(__a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCAmelCase ( self : List[str] , __a : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Optional[int] = self.get_decoder()
__lowercase : Any = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase : Union[str, Any] = processor.batch_decode(__a )
else:
with get_context(__a ).Pool() as pool:
__lowercase : Optional[Any] = processor.batch_decode(__a , __a )
__lowercase : Union[str, Any] = list(__a )
with get_context("""fork""" ).Pool() as p:
__lowercase : Optional[Any] = decoder.decode_beams_batch(__a , __a )
__lowercase , __lowercase , __lowercase : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__a , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__a , decoded_processor.logit_score )
self.assertListEqual(__a , decoded_processor.lm_score )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.get_feature_extractor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = self.get_decoder()
__lowercase : int = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : Dict = self._get_dummy_logits()
__lowercase : Tuple = 15
__lowercase : Tuple = -20.0
__lowercase : Dict = -4.0
__lowercase : Dict = processor.batch_decode(
__a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Tuple = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Any = decoder.decode_beams_batch(
__a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , )
__lowercase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][2] for d in decoded_decoder_out]
__lowercase : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __a )
self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __a , atol=1E-3 ) )
self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_decoder()
__lowercase : Dict = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
__lowercase : List[Any] = self._get_dummy_logits()
__lowercase : Optional[int] = 2.0
__lowercase : Tuple = 5.0
__lowercase : Optional[Any] = -20.0
__lowercase : Tuple = True
__lowercase : Union[str, Any] = processor.batch_decode(
__a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
__lowercase : Any = decoded_processor_out.text
__lowercase : List[Any] = list(__a )
decoder.reset_params(
alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , )
with get_context("""fork""" ).Pool() as pool:
__lowercase : Tuple = decoder.decode_beams_batch(
__a , __a , )
__lowercase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__a , __a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __a )
__lowercase : str = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : int = os.listdir(__a )
__lowercase : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__a )
__lowercase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowercase : List[Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase : Dict = os.listdir(__a )
__lowercase : List[Any] = os.listdir(__a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = floats_list((3, 1000) )
__lowercase : List[str] = processor_wavaveca(__a , return_tensors="""np""" )
__lowercase : List[Any] = processor_auto(__a , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase : List[str] = self._get_dummy_logits()
__lowercase : List[str] = processor_wavaveca.batch_decode(__a )
__lowercase : Optional[int] = processor_auto.batch_decode(__a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.get_feature_extractor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = self.get_decoder()
__lowercase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCAmelCase ( __a : Union[str, Any] , __a : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Optional[Any] = self._get_dummy_logits()[0]
__lowercase : Dict = processor.decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase : Any = self._get_dummy_logits()
__lowercase : Dict = processor.batch_decode(__a , output_word_offsets=__a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__a , __a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__a , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
import torch
__lowercase : Any = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__a )
__lowercase : str = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=16000 ) )
__lowercase : Tuple = iter(__a )
__lowercase : Union[str, Any] = next(__a )
__lowercase : int = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase : int = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase : Union[str, Any] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase : List[Any] = model(__a ).logits.cpu().numpy()
__lowercase : Tuple = processor.decode(logits[0] , output_word_offsets=__a )
__lowercase : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase : Optional[Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase : str = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , __a )
self.assertEqual(""" """.join(self.get_from_offsets(__a , """word""" ) ) , output.text )
# output times
__lowercase : Tuple = torch.tensor(self.get_from_offsets(__a , """start_time""" ) )
__lowercase : Dict = torch.tensor(self.get_from_offsets(__a , """end_time""" ) )
# fmt: off
__lowercase : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
self.assertTrue(torch.allclose(__a , __a , atol=0.01 ) )
| 649
| 0
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
lowerCamelCase : str = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
lowerCamelCase : int = {
'''ctrl''': 2_56,
}
lowerCamelCase : Optional[int] = {
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = set()
__lowercase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : int = char
__lowercase : Optional[Any] = set(__snake_case )
return pairs
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = CONTROL_CODES
def __init__( self : Optional[int] , __a : Optional[Any] , __a : Tuple , __a : str="<unk>" , **__a : Optional[int] ) -> str:
"""simple docstring"""
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__lowercase : Dict = json.load(__lowerCAmelCase )
__lowercase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
__lowercase : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
__lowercase : List[str] = [tuple(merge.split() ) for merge in merges]
__lowercase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__lowercase : Optional[int] = {}
@property
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Dict , __a : Dict ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowercase : Optional[int] = tuple(__lowerCAmelCase )
__lowercase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__lowercase : List[str] = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
__lowercase : List[Any] = min(__lowerCAmelCase , key=lambda __a : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase : Optional[int] = bigram
__lowercase : int = []
__lowercase : Tuple = 0
while i < len(__lowerCAmelCase ):
try:
__lowercase : List[Any] = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Optional[int] = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : Union[str, Any] = tuple(__lowerCAmelCase )
__lowercase : List[str] = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
__lowercase : Optional[Any] = get_pairs(__lowerCAmelCase )
__lowercase : List[Any] = """@@ """.join(__lowerCAmelCase )
__lowercase : str = word[:-4]
__lowercase : str = word
return word
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Dict = []
__lowercase : Union[str, Any] = re.findall(r"""\S+\n?""" , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase ( self : List[str] , __a : Optional[int] ) -> Any:
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Any , __a : str ) -> Dict:
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def lowerCAmelCase ( self : Dict , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = """ """.join(__lowerCAmelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCAmelCase ( self : List[Any] , __a : Tuple , __a : Union[str, Any] = None ) -> Dict:
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : str = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Union[str, Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" )
__lowercase : Tuple = 0
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__lowercase : Any = token_index
writer.write(""" """.join(__lowerCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 707
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(0 ) == 0 )
def snake_case_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 649
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def snake_case_ ( ):
__lowercase : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase : Union[str, Any] = get_sagemaker_input()
else:
__lowercase : str = get_cluster_input()
return config
def snake_case_ ( lowerCAmelCase_ : List[str]=None ):
if subparsers is not None:
__lowercase : Optional[int] = subparsers.add_parser("""config""" , description=lowerCAmelCase_ )
else:
__lowercase : List[str] = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCAmelCase_ )
parser.add_argument(
"""--config_file""" , default=lowerCAmelCase_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : Union[str, Any] = get_user_input()
if args.config_file is not None:
__lowercase : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
__lowercase : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(F"accelerate configuration saved at {config_file}" )
def snake_case_ ( ):
__lowercase : str = config_command_parser()
__lowercase : str = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 649
| 0
|
import argparse
from collections import defaultdict
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : List[str] = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_lowercase , """r""" ) as f:
__lowercase : str = f.readlines()
__lowercase : Any = F"class {class_name}("
__lowercase : Any = F"{4 * ' '}def {test_name}("
__lowercase : Optional[Any] = F"{8 * ' '}{correct_line.split()[0]}"
__lowercase : Any = F"{16 * ' '}{correct_line.split()[0]}"
__lowercase : Any = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : List[str] = 0
__lowercase : Optional[int] = 0
__lowercase : List[Any] = []
for line in lines:
if line.startswith(_lowercase ):
__lowercase : Tuple = True
elif in_class and line.startswith(_lowercase ):
__lowercase : Optional[Any] = True
elif in_class and in_func and (line.startswith(_lowercase ) or line.startswith(_lowercase )):
__lowercase : List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowercase : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowercase : str = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
__lowercase : Optional[Any] = False
else:
new_lines.append(_lowercase )
with open(_lowercase , """w""" ) as f:
for line in new_lines:
f.write(_lowercase )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=None ):
if fail is not None:
with open(_lowercase , """r""" ) as f:
__lowercase : Optional[int] = {l.strip() for l in f.readlines()}
else:
__lowercase : Optional[Any] = None
with open(_lowercase , """r""" ) as f:
__lowercase : Any = f.readlines()
__lowercase : List[Any] = defaultdict(_lowercase )
for line in correct_lines:
__lowercase : Any = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCamelCase : Tuple = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 709
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] | None = None ):
__lowercase : Tuple = word_bank or []
# create a table
__lowercase : int = len(lowerCAmelCase_ ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(lowerCAmelCase_ ):
table.append([] )
# seed value
__lowercase : Dict = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCAmelCase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCAmelCase_ )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCAmelCase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCAmelCase_ )]:
combination.reverse()
return table[len(lowerCAmelCase_ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 649
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.