code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple ='''dpr'''
def __init__( self ,A__=3_0_5_2_2 ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=2 ,A__=0.02 ,A__=1E-12 ,A__=0 ,A__="absolute" ,A__ = 0 ,**A__ ,):
super().__init__(pad_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = projection_dim
lowercase = position_embedding_type
| 101 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='nllb-moe'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=12_81_12 , a_=10_24 , a_=12 , a_=40_96 , a_=16 , a_=12 , a_=40_96 , a_=16 , a_=0.05 , a_=0.05 , a_=True , a_=True , a_="relu" , a_=10_24 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.02 , a_=2 , a_=True , a_=False , a_="float32" , a_=False , a_=1_28 , a_=64 , a_=4 , a_=4 , a_=0.001 , a_=0.001 , a_="all" , a_=False , a_=False , a_=1.0 , a_=0.2 , a_=1 , a_=0 , a_=2 , a_=False , **a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = vocab_size
__snake_case : int = max_position_embeddings
__snake_case : int = d_model
__snake_case : List[str] = encoder_ffn_dim
__snake_case : Optional[Any] = encoder_layers
__snake_case : Dict = encoder_attention_heads
__snake_case : str = decoder_ffn_dim
__snake_case : Dict = decoder_layers
__snake_case : Union[str, Any] = decoder_attention_heads
__snake_case : Optional[Any] = dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Union[str, Any] = activation_dropout
__snake_case : List[Any] = activation_function
__snake_case : int = init_std
__snake_case : List[Any] = encoder_layerdrop
__snake_case : Optional[Any] = decoder_layerdrop
__snake_case : Tuple = use_cache
__snake_case : Dict = encoder_layers
__snake_case : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : List[str] = router_z_loss_coef
__snake_case : int = router_aux_loss_coef
__snake_case : Union[str, Any] = decoder_sparse_step
__snake_case : Tuple = encoder_sparse_step
__snake_case : int = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Optional[int] = router_dtype
__snake_case : Tuple = router_ignore_padding_tokens
__snake_case : List[Any] = batch_prioritized_routing
__snake_case : Optional[int] = second_expert_policy
__snake_case : Any = normalize_router_prob_before_dropping
__snake_case : List[Any] = moe_eval_capacity_token_fraction
__snake_case : Optional[int] = moe_token_dropout
__snake_case : int = output_router_logits
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , **a_ , )
| 102 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''note_seq''']
def __init__( self : List[str] , *A_ : Optional[int] , **A_ : Union[str, Any]):
requires_backends(self , ['''note_seq'''])
@classmethod
def UpperCAmelCase__ ( cls : str , *A_ : List[str] , **A_ : int):
requires_backends(cls , ['''note_seq'''])
@classmethod
def UpperCAmelCase__ ( cls : str , *A_ : Dict , **A_ : Optional[int]):
requires_backends(cls , ['''note_seq'''])
| 103 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] ,model_result['''ss'''] ):
__lowercase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''sgugger/tiny-distilbert-classification'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,only_pretrain_model=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,torchscript=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' ,'''Cant do half precision''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,fpaa=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = AutoConfig.from_pretrained(lowercase__ )
# set architectures equal to `None`
__lowercase = None
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ ,configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' ,'''Can\'t do half precision''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=lowercase__ ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ ,configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''sshleifer/tinier_bart'''
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ ,configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = '''sshleifer/tiny-gpt2'''
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ ,configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''sshleifer/tinier_bart'''
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ ,configs=[config] )
__lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,save_to_csv=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(lowercase__ ,'''inf_time.csv''' ) ,train_memory_csv_file=os.path.join(lowercase__ ,'''train_mem.csv''' ) ,inference_memory_csv_file=os.path.join(lowercase__ ,'''inf_mem.csv''' ) ,train_time_csv_file=os.path.join(lowercase__ ,'''train_time.csv''' ) ,env_info_csv_file=os.path.join(lowercase__ ,'''env.csv''' ) ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase__ ,'''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ ,'''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ ,'''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ ,'''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase__ ,'''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowercase__ : List[Any] ):
self.assertTrue(hasattr(lowercase__ ,'''sequential''' ) )
self.assertTrue(hasattr(lowercase__ ,'''cumulative''' ) )
self.assertTrue(hasattr(lowercase__ ,'''current''' ) )
self.assertTrue(hasattr(lowercase__ ,'''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=lowercase__ ,inference=lowercase__ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(lowercase__ ,'''log.txt''' ) ,log_print=lowercase__ ,trace_memory_line_by_line=lowercase__ ,multi_process=lowercase__ ,)
__lowercase = PyTorchBenchmark(lowercase__ )
__lowercase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase__ ,'''log.txt''' ) ).exists() )
| 104 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->str:
'''simple docstring'''
a, a : Union[str, Any] = emb.weight.shape
a : Tuple = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
a : List[Any] = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
a : str = torch.load(_lowercase , map_location="cpu" )
a : List[str] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
a : Optional[int] = mam_aaa["model"]
remove_ignore_keys_(_lowercase )
a : Optional[int] = state_dict["encoder.embed_tokens.weight"].shape[0]
a : Optional[Any] = MaMaaaConfig(
vocab_size=_lowercase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
a : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
a : Optional[Any] = MaMaaaForConditionalGeneration(_lowercase )
model.model.load_state_dict(_lowercase , strict=_lowercase )
a : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Dict = parser.parse_args()
a : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 105 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = len(A_ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase__ : List[str] = arr[mi::-1] + arr[mi + 1 : len(A_ )]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(A_ )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 106 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_features""", """is_longer"""]
def __init__( self : Dict , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Union[str, Any]=4_80_00 , __lowerCamelCase : Optional[int]=4_80 , __lowerCamelCase : List[Any]=10 , __lowerCamelCase : str=10_24 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : int=False , __lowerCamelCase : float = 0 , __lowerCamelCase : float = 1_40_00 , __lowerCamelCase : int = None , __lowerCamelCase : str = "fusion" , __lowerCamelCase : str = "repeatpad" , **__lowerCamelCase : Any , ) -> Optional[int]:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
a = top_db
a = truncation
a = padding
a = fft_window_size
a = (fft_window_size >> 1) + 1
a = hop_length
a = max_length_s
a = max_length_s * sampling_rate
a = sampling_rate
a = frequency_min
a = frequency_max
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCamelCase , min_frequency=__lowerCamelCase , max_frequency=__lowerCamelCase , sampling_rate=__lowerCamelCase , norm=__lowerCamelCase , mel_scale="htk" , )
a = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCamelCase , min_frequency=__lowerCamelCase , max_frequency=__lowerCamelCase , sampling_rate=__lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def __UpperCAmelCase ( self : Any ) -> Dict[str, Any]:
a = copy.deepcopy(self.__dict__ )
a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __UpperCAmelCase ( self : str , __lowerCamelCase : np.array , __lowerCamelCase : Optional[np.array] = None ) -> np.ndarray:
a = spectrogram(
__lowerCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowerCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> List[Any]:
a = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a = [0]
# randomly choose index for each part
a = np.random.choice(ranges[0] )
a = np.random.choice(ranges[1] )
a = np.random.choice(ranges[2] )
a = mel[idx_front : idx_front + chunk_frames, :]
a = mel[idx_middle : idx_middle + chunk_frames, :]
a = mel[idx_back : idx_back + chunk_frames, :]
a = torch.tensor(mel[None, None, :] )
a = torch.nn.functional.interpolate(
__lowerCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__lowerCamelCase )
a = mel_shrink[0][0].numpy()
a = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __UpperCAmelCase ( self : str , __lowerCamelCase : np.array , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Any ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a = len(__lowerCamelCase ) - max_length
a = np.random.randint(0 , overflow + 1 )
a = waveform[idx : idx + max_length]
a = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters )
a = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a = np.stack([mel, mel, mel, mel] , axis=0 )
a = False
else:
a = self._random_mel_fusion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
a = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a = int(max_length / len(__lowerCamelCase ) )
a = np.stack(np.tile(__lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a = int(max_length / len(__lowerCamelCase ) )
a = np.stack(np.tile(__lowerCamelCase , __lowerCamelCase ) )
a = np.pad(__lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
a = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters )
a = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
a = self._np_extract_fbank_features(__lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : str = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
a = truncation if truncation is not None else self.truncation
a = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
a = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
a = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a = [np.asarray(__lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
a = [
self._get_input_mel(__lowerCamelCase , max_length if max_length else self.nb_max_samples , __lowerCamelCase , __lowerCamelCase )
for waveform in raw_speech
]
a = []
a = []
for mel, longer in padded_inputs:
input_mel.append(__lowerCamelCase )
is_longer.append(__lowerCamelCase )
if truncation == "fusion" and sum(__lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a = np.random.randint(0 , len(__lowerCamelCase ) )
a = True
if isinstance(input_mel[0] , __lowerCamelCase ):
a = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a = [[longer] for longer in is_longer]
a = {"input_features": input_mel, "is_longer": is_longer}
a = BatchFeature(__lowerCamelCase )
if return_tensors is not None:
a = input_features.convert_to_tensors(__lowerCamelCase )
return input_features
| 107 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
pass
| 108 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 109 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
import json
import sys
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> Any:
with open(_lowerCAmelCase , encoding='''utf-8''' ) as f:
UpperCAmelCase : List[Any] = json.load(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_lowerCAmelCase ):
UpperCAmelCase : List[Any] = results[benchmark_name]
UpperCAmelCase : Union[str, Any] = benchmark_name.split('''/''' )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase : Any = """| metric |"""
UpperCAmelCase : str = """|--------|"""
UpperCAmelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(_lowerCAmelCase ):
UpperCAmelCase : List[Any] = benchmark_res[metric_name]
UpperCAmelCase : Tuple = metric_vals["""new"""]
UpperCAmelCase : int = metric_vals.get('''old''' , _lowerCAmelCase )
UpperCAmelCase : Dict = metric_vals.get('''diff''' , _lowerCAmelCase )
UpperCAmelCase : Dict = f""" {new_val:f}""" if isinstance(_lowerCAmelCase , (int, float) ) else """None"""
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(_lowerCAmelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(_lowerCAmelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(_lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase__: List[str] = sys.argv[1]
UpperCamelCase__: Optional[int] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : str = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """longformer"""
def __init__( self , _lowercase = 512 , _lowercase = 2 , _lowercase = 1 , _lowercase = 0 , _lowercase = 2 , _lowercase = 30_522 , _lowercase = 768 , _lowercase = 12 , _lowercase = 12 , _lowercase = 3_072 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 512 , _lowercase = 2 , _lowercase = 0.0_2 , _lowercase = 1e-12 , _lowercase = False , **_lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=_lowercase , **_lowercase )
a_ : Dict = attention_window
a_ : Union[str, Any] = sep_token_id
a_ : Optional[int] = bos_token_id
a_ : int = eos_token_id
a_ : List[Any] = vocab_size
a_ : Dict = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : List[Any] = hidden_act
a_ : Any = intermediate_size
a_ : Any = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : Dict = type_vocab_size
a_ : int = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : Union[str, Any] = onnx_export
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = "default" , _lowercase = None ) -> Dict:
super().__init__(_lowercase , _lowercase , _lowercase )
a_ : Optional[int] = True
@property
def UpperCamelCase__ ( self ) -> Tuple:
if self.task == "multiple-choice":
a_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ) -> Tuple:
a_ : str = super().outputs
if self.task == "default":
a_ : Any = {0: """batch"""}
return outputs
@property
def UpperCamelCase__ ( self ) -> List[str]:
return 1e-4
@property
def UpperCamelCase__ ( self ) -> List[Any]:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def UpperCamelCase__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> Optional[Any]:
a_ : List[Any] = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a_ : Any = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
a_ : Union[str, Any] = 1
return inputs
| 248 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
snake_case_ = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _lowerCAmelCase ( lowercase_=None ):
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=lowercase_ , default=lowercase_ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=lowercase_ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=lowercase_ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=lowercase_ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=lowercase_ )
return parser
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowercase_ ):
UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase = defaults.commands
if not args.tpu_name:
UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCAmelCase = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , lowercase_ ):
UpperCAmelCase = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowercase_ ):
UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCAmelCase = """; """.join(lowercase_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(lowercase_ )}""" )
return
subprocess.run(lowercase_ )
print('Successfully setup pod.' )
def _lowerCAmelCase ( ):
UpperCAmelCase = tpu_command_parser()
UpperCAmelCase = parser.parse_args()
tpu_command_launcher(lowercase_ )
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = OmegaConf.load(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["""model"""]
lowerCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase_ = {}
lowerCAmelCase_ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase_ = {}
lowerCAmelCase_ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCAmelCase_ = state_dict[key]
lowerCAmelCase_ = config.model.params.first_stage_config.params
lowerCAmelCase_ = config.model.params.unet_config.params
lowerCAmelCase_ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCAmelCase_ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCAmelCase_ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 231 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowercase__ : List[Any] = gray_code_sequence_string(UpperCAmelCase )
#
# convert them to integers
for i in range(len(UpperCAmelCase ) ):
lowercase__ : str = int(sequence[i] , 2 )
return sequence
def __UpperCamelCase ( UpperCAmelCase ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase__ : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase__ : Dict = gray_code_sequence_string(bit_count - 1 )
lowercase__ : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase__ : List[Any] = """0""" + smaller_sequence[i]
sequence.append(UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase__ : Dict = """1""" + smaller_sequence[i]
sequence.append(UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase : int = """megatron-bert"""
def __init__( self : str , __UpperCAmelCase : str=29056 , __UpperCAmelCase : Union[str, Any]=1024 , __UpperCAmelCase : Union[str, Any]=24 , __UpperCAmelCase : Dict=16 , __UpperCAmelCase : Dict=4096 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : List[str]=True , **__UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : List[str] = hidden_size
a : Tuple = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Tuple = hidden_act
a : Union[str, Any] = intermediate_size
a : List[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Tuple = type_vocab_size
a : str = initializer_range
a : int = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : Optional[int] = use_cache
| 40 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__snake_case = '''bert-base-cased'''
__snake_case = '''google/pegasus-xsum'''
__snake_case = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__snake_case = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__snake_case = '''patrickvonplaten/t5-tiny-random'''
__snake_case = '''sshleifer/bart-tiny-random'''
__snake_case = '''sshleifer/tiny-mbart'''
__snake_case = '''sshleifer/tiny-marian-en-de'''
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = """\n""".join(_lowerCAmelCase )
Path(_lowerCAmelCase ).open('''w''' ).writelines(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCAmelCase, f'{split}.source' ), _lowerCAmelCase )
_dump_articles(os.path.join(_lowerCAmelCase, f'{split}.target' ), _lowerCAmelCase )
return tmp_dir
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
_a = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
_a = 4
_a = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_a = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_a = SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , )
_a = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_a = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_a = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_a = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
_a = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
_a = 4
_a = LegacySeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=20 , max_target_length=__UpperCAmelCase , )
_a = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _UpperCAmelCase ( self ) -> int:
_a = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
_a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_a = tmp_dir.joinpath('''train.source''' ).open().readlines()
_a = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__UpperCAmelCase , __UpperCAmelCase , 128 , __UpperCAmelCase )
_a = {x.name for x in tmp_dir.iterdir()}
_a = {x.name for x in save_dir.iterdir()}
_a = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__UpperCAmelCase ) < len(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__UpperCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def _UpperCAmelCase ( self ) -> int:
if not FAIRSEQ_AVAILABLE:
return
_a = self._get_dataset(max_len=64 )
_a = 64
_a = ds.make_dynamic_sampler(__UpperCAmelCase , required_batch_size_multiple=__UpperCAmelCase )
_a = [len(__UpperCAmelCase ) for x in batch_sampler]
assert len(set(__UpperCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__UpperCAmelCase ) == len(__UpperCAmelCase ) # no dropped or added examples
_a = DataLoader(__UpperCAmelCase , batch_sampler=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_a = []
_a = []
for batch in data_loader:
_a = batch["""input_ids"""].shape
_a = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_a = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__UpperCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__UpperCAmelCase )
assert num_src_per_batch[0] == max(__UpperCAmelCase )
if failures:
raise AssertionError(F'too many tokens in {len(__UpperCAmelCase )} batches' )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self._get_dataset(max_len=512 )
_a = 2
_a = ds.make_sortish_sampler(__UpperCAmelCase , shuffle=__UpperCAmelCase )
_a = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_a = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__UpperCAmelCase )
_a = tokenizer.pad_token_id
def count_pad_tokens(__UpperCAmelCase , __UpperCAmelCase="input_ids" ):
return [batch[k].eq(__UpperCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__UpperCAmelCase , k='''labels''' ) ) < sum(count_pad_tokens(__UpperCAmelCase , k='''labels''' ) )
assert sum(count_pad_tokens(__UpperCAmelCase ) ) < sum(count_pad_tokens(__UpperCAmelCase ) )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=1000 , __UpperCAmelCase=128 ) -> str:
if os.getenv('''USE_REAL_DATA''' , __UpperCAmelCase ):
_a = """examples/seq2seq/wmt_en_ro"""
_a = max_len * 2 * 64
if not Path(__UpperCAmelCase ).joinpath('''train.len''' ).exists():
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
else:
_a = """examples/seq2seq/test_data/wmt_en_ro"""
_a = max_len * 4
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
_a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_a = SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='''train''' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , n_obs=__UpperCAmelCase , )
return ds, max_tokens, tokenizer
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self._get_dataset()
_a = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__UpperCAmelCase ) )
_a = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__UpperCAmelCase ) )
assert idsa.intersection(__UpperCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
_a = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
if tok_name == MBART_TINY:
_a = SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_a = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_a = SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_a = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__UpperCAmelCase ) == 1 if tok_name == BART_TINY else len(__UpperCAmelCase ) == 0 | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000_000 ) -> List[str]:
snake_case__ : Any = set(range(3 , _lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCAmelCase , _lowerCAmelCase ) ) )
snake_case__ : Any = [float(_lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 35 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """"""
if is_panoptic:
SCREAMING_SNAKE_CASE = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
def __lowercase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
SCREAMING_SNAKE_CASE = """resnet101"""
if "dc5" in model_name:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = """panoptic""" in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE = 2_50
else:
SCREAMING_SNAKE_CASE = 91
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """coco-detection-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# load image processor
SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection"""
SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=_SCREAMING_SNAKE_CASE )
# prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
SCREAMING_SNAKE_CASE = torch.hub.load("""DeppMeng/ConditionalDETR""" , _SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
SCREAMING_SNAKE_CASE = """conditional_detr.""" + src
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE , is_panoptic=_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=_SCREAMING_SNAKE_CASE , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
SCREAMING_SNAKE_CASE = conditional_detr(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 296 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class snake_case__ :
def __init__( self : List[Any] , _A : str , _A : Optional[Any]=13 , _A : Optional[Any]=7 , _A : List[str]=False , _A : str=True , _A : Dict=False , _A : Optional[int]=False , _A : Union[str, Any]=19 , _A : List[str]=32 , _A : List[str]=5 , _A : str=4 , _A : Optional[Any]=37 , _A : Optional[int]="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : str=5_12 , _A : Union[str, Any]=16 , _A : Optional[Any]=2 , _A : List[Any]=0.02 , _A : Optional[int]=3 , _A : str=4 , _A : Dict=None , ) -> Dict:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def A ( self : List[Any] ) -> Any:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def A ( self : Any , _A : List[str] , _A : List[Any] , _A : str , _A : Optional[Any] , _A : List[Any] , _A : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = EsmForProteinFolding(config=_A ).float()
model.to(_A )
model.eval()
UpperCAmelCase_ : Any = model(_A , attention_mask=_A )
UpperCAmelCase_ : List[str] = model(_A )
UpperCAmelCase_ : str = model(_A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
a_ = False
a_ = (EsmForProteinFolding,) if is_torch_available() else ()
a_ = ()
a_ = {} if is_torch_available() else {}
a_ = False
def A ( self : Dict ) -> str:
UpperCAmelCase_ : str = EsmFoldModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def A ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@unittest.skip('''Does not support attention outputs''' )
def A ( self : int ) -> str:
pass
@unittest.skip
def A ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def A ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def A ( self : int ) -> str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def A ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Tuple ) -> Tuple:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def A ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def A ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def A ( self : str ) -> Any:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def A ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def A ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def A ( self : Union[str, Any] ) -> int:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def A ( self : List[Any] ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : int ) -> List[str]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : Tuple ) -> str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def A ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def A ( self : str ) -> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> int:
pass
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE):
@slow
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : List[Any] = model(_A )["""positions"""]
UpperCAmelCase_ : Tuple = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1e-4 ) )
| 304 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase__ ( UpperCAmelCase_ : Any ) -> List[Any]:
__lowerCamelCase : List[str] = SwinConfig()
__lowerCamelCase : List[str] = swin_name.split('_' )
__lowerCamelCase : Tuple = name_split[1]
__lowerCamelCase : Union[str, Any] = int(name_split[4] )
__lowerCamelCase : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
__lowerCamelCase : int = 96
__lowerCamelCase : List[str] = (2, 2, 6, 2)
__lowerCamelCase : List[str] = (3, 6, 12, 24)
elif model_size == "small":
__lowerCamelCase : Optional[Any] = 96
__lowerCamelCase : List[str] = (2, 2, 18, 2)
__lowerCamelCase : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
__lowerCamelCase : Any = 1_28
__lowerCamelCase : Optional[Any] = (2, 2, 18, 2)
__lowerCamelCase : Optional[Any] = (4, 8, 16, 32)
else:
__lowerCamelCase : List[Any] = 1_92
__lowerCamelCase : Any = (2, 2, 18, 2)
__lowerCamelCase : Optional[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
__lowerCamelCase : int = 2_18_41
else:
__lowerCamelCase : str = 10_00
__lowerCamelCase : int = """huggingface/label-files"""
__lowerCamelCase : Dict = """imagenet-1k-id2label.json"""
__lowerCamelCase : List[Any] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Optional[int] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCamelCase : Tuple = idalabel
__lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : List[str] = img_size
__lowerCamelCase : Optional[Any] = num_classes
__lowerCamelCase : Any = embed_dim
__lowerCamelCase : Tuple = depths
__lowerCamelCase : Optional[int] = num_heads
__lowerCamelCase : str = window_size
return config
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Any:
if "patch_embed.proj" in name:
__lowerCamelCase : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase : List[Any] = """encoder.""" + name
if "attn.proj" in name:
__lowerCamelCase : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCamelCase : Optional[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCamelCase : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : List[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
__lowerCamelCase : str = """layernorm.weight"""
if name == "norm.bias":
__lowerCamelCase : List[Any] = """layernorm.bias"""
if "head" in name:
__lowerCamelCase : Union[str, Any] = name.replace('head' , 'classifier' )
else:
__lowerCamelCase : str = """swin.""" + name
return name
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) -> Dict:
for key in orig_state_dict.copy().keys():
__lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCamelCase : Tuple = key.split('.' )
__lowerCamelCase : Tuple = int(key_split[1] )
__lowerCamelCase : Dict = int(key_split[3] )
__lowerCamelCase : Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase : List[Any] = val[:dim, :]
__lowerCamelCase : int = val[
dim : dim * 2, :
]
__lowerCamelCase : Optional[int] = val[-dim:, :]
else:
__lowerCamelCase : Union[str, Any] = val[
:dim
]
__lowerCamelCase : Tuple = val[
dim : dim * 2
]
__lowerCamelCase : List[Any] = val[
-dim:
]
else:
__lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ) -> int:
__lowerCamelCase : Optional[Any] = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
__lowerCamelCase : Union[str, Any] = get_swin_config(UpperCAmelCase_ )
__lowerCamelCase : int = SwinForImageClassification(UpperCAmelCase_ )
model.eval()
__lowerCamelCase : Optional[Any] = convert_state_dict(timm_model.state_dict() , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
__lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
__lowerCamelCase : List[str] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
__lowerCamelCase : str = image_processor(images=UpperCAmelCase_ , return_tensors='pt' )
__lowerCamelCase : Optional[int] = timm_model(inputs['pixel_values'] )
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ : int = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 185 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = None
lowerCamelCase__ = None
def snake_case_ ( ) -> Any:
UpperCAmelCase : str = Node(1 )
UpperCAmelCase : int = Node(2 )
UpperCAmelCase : Dict = Node(3 )
UpperCAmelCase : int = Node(4 )
UpperCAmelCase : List[str] = Node(5 )
return tree
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Tuple:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Dict:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def snake_case_ ( _lowerCAmelCase : Any ) -> Any:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def snake_case_ ( _lowerCAmelCase : Any ) -> Union[str, Any]:
UpperCAmelCase : list[Any] = []
if root is None:
return output
UpperCAmelCase : Optional[int] = deque([root] )
while process_queue:
UpperCAmelCase : Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : list[Any] = []
def populate_output(_lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCAmelCase , _lowerCAmelCase )
return output
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> List[Any]:
UpperCAmelCase : list[Any] = []
def populate_output(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCAmelCase , _lowerCAmelCase )
return output
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if root is None:
return []
UpperCAmelCase : list[Sequence[Node | None]] = []
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = height(_lowerCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : int = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase : Any = 0
return output
def snake_case_ ( ) -> Dict: # Main function for testing.
UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(_lowerCAmelCase )}""" )
print(f"""Pre-order Traversal: {preorder(_lowerCAmelCase )}""" )
print(f"""Post-order Traversal: {postorder(_lowerCAmelCase )}""" , '''\n''' )
print(f"""Height of Tree: {height(_lowerCAmelCase )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(_lowerCAmelCase ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(_lowerCAmelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(_lowerCAmelCase , level=_lowerCAmelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 23 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
__snake_case : List[str] = {
"""joule""": 1.0,
"""kilojoule""": 10_00,
"""megajoule""": 1_00_00_00,
"""gigajoule""": 10_00_00_00_00,
"""wattsecond""": 1.0,
"""watthour""": 36_00,
"""kilowatthour""": 3_60_00_00,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 41_86.8,
"""kilocalorie_nutr""": 4_18_68_00.00,
"""electronvolt""": 1.602176634e-19,
"""britishthermalunit_it""": 10_55.0_55_85,
"""footpound""": 1.35_5818,
}
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
a_ : Dict = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(a__)}'''
)
raise ValueError(a__)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCamelCase = """trocr"""
__UpperCamelCase = ["""past_key_values"""]
__UpperCamelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self :Dict , lowercase_ :Any=5_02_65 , lowercase_ :int=10_24 , lowercase_ :int=12 , lowercase_ :Tuple=16 , lowercase_ :int=40_96 , lowercase_ :Optional[int]="gelu" , lowercase_ :Union[str, Any]=5_12 , lowercase_ :str=0.1 , lowercase_ :Any=0.0 , lowercase_ :List[Any]=0.0 , lowercase_ :Dict=2 , lowercase_ :str=0.02 , lowercase_ :str=0.0 , lowercase_ :Any=True , lowercase_ :Optional[int]=False , lowercase_ :Union[str, Any]=True , lowercase_ :Optional[Any]=True , lowercase_ :str=1 , lowercase_ :List[Any]=0 , lowercase_ :int=2 , **lowercase_ :Union[str, Any] , ) -> Optional[int]:
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = activation_function
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = init_std
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = scale_embedding
UpperCAmelCase = use_learned_position_embeddings
UpperCAmelCase = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 78 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = state_dict.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase_ = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase_ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:256, :]
lowerCAmelCase_ = in_proj_bias[:256]
lowerCAmelCase_ = in_proj_weight[256:512, :]
lowerCAmelCase_ = in_proj_bias[256:512]
lowerCAmelCase_ = in_proj_weight[-256:, :]
lowerCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase_ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowerCAmelCase_ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase_ = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[:256]
lowerCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[256:512]
lowerCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase_ = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = image.size
lowerCAmelCase_ = max(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = 800 if """detection""" in checkpoint_url else 1000
lowerCAmelCase_ = target_max_size / current_max_size
lowerCAmelCase_ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = F.to_tensor(__lowerCAmelCase )
lowerCAmelCase_ = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase_ = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ = """model."""
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCAmelCase_ = state_dict.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
# create HuggingFace model and load state dict
lowerCAmelCase_ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase_ = 15
lowerCAmelCase_ = 2
lowerCAmelCase_ = {0: """table""", 1: """table rotated"""}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ = 125
lowerCAmelCase_ = 6
lowerCAmelCase_ = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowerCAmelCase_ = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
lowerCAmelCase_ = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCAmelCase_ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__lowerCAmelCase )
lowerCAmelCase_ = Image.open(__lowerCAmelCase ).convert("RGB" )
lowerCAmelCase_ = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
lowerCAmelCase_ = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
lowerCAmelCase_ = (1, 15, 3)
lowerCAmelCase_ = torch.tensor(
[[-6.7_897, -16.9985, 6.7_937], [-8.0_186, -22.2192, 6.9_677], [-7.3_117, -21.0708, 7.4_055]] )
lowerCAmelCase_ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowerCAmelCase_ = (1, 125, 7)
lowerCAmelCase_ = torch.tensor(
[[-18.1430, -8.3_214, 4.8_274], [-18.4685, -7.1_361, -4.2_667], [-26.3693, -9.3_429, -4.9_962]] )
lowerCAmelCase_ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowerCAmelCase_ = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_A = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 231 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
'''simple docstring'''
__a: Dict = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = set()
# keep track of all the paths to be checked
lowercase__ : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowercase__ : Any = queue.pop(0 )
# get the last node from the path
lowercase__ : Union[str, Any] = path[-1]
if node not in explored:
lowercase__ : List[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowercase__ : str = list(UpperCAmelCase )
new_path.append(UpperCAmelCase )
queue.append(UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowercase__ : Optional[Any] = [start]
lowercase__ : List[Any] = set(UpperCAmelCase )
# Keep tab on distances from `start` node.
lowercase__ : int = {start: 0, target: -1}
while queue:
lowercase__ : List[Any] = queue.pop(0 )
if node == target:
lowercase__ : Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase )
queue.append(UpperCAmelCase )
lowercase__ : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 198 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = BlenderbotSmallTokenizer
UpperCAmelCase : Dict = False
def __snake_case ( self : int):
super().setUp()
a : Optional[int] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
a : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a : List[str] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
a : Tuple = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__UpperCAmelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__UpperCAmelCase))
def __snake_case ( self : Dict , **__UpperCAmelCase : List[Any]):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Tuple):
a : int = """adapt act apte"""
a : str = """adapt act apte"""
return input_text, output_text
def __snake_case ( self : Optional[Any]):
a : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : List[Any] = """adapt act apte"""
a : Optional[int] = ["""adapt""", """act""", """ap@@""", """te"""]
a : str = tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
a : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : List[str] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
a : Tuple = """I am a small frog."""
a : Tuple = tok([src_text] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase)["""input_ids"""]
a : Any = tok.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __snake_case ( self : List[str]):
a : Tuple = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
a : int = """I am a small frog ."""
a : Any = """."""
a : List[Any] = tok(__UpperCAmelCase)["""input_ids"""]
a : str = tok(__UpperCAmelCase)["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 40 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_a = """"""
else:
_a = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_a = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[
: config.hidden_size, :
]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = dct.pop(_lowerCAmelCase )
_a = val
def A_ ( ):
"""simple docstring"""
_a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any]=True ):
"""simple docstring"""
_a = ViTConfig()
# patch_size
if model_name[-1] == "8":
_a = 8
# set labels if required
if not base_model:
_a = 10_00
_a = """huggingface/label-files"""
_a = """imagenet-1k-id2label.json"""
_a = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_a = 3_84
_a = 15_36
_a = 12
_a = 6
# load original model from torch hub
_a = torch.hub.load('''facebookresearch/dino:main''', _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_a = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_a = create_rename_keys(_lowerCAmelCase, base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# load HuggingFace model
if base_model:
_a = ViTModel(_lowerCAmelCase, add_pooling_layer=_lowerCAmelCase ).eval()
else:
_a = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_a = ViTImageProcessor()
_a = image_processor(images=prepare_img(), return_tensors='''pt''' )
_a = encoding["""pixel_values"""]
_a = model(_lowerCAmelCase )
if base_model:
_a = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase, outputs.last_hidden_state[:, 0, :], atol=1e-1 )
else:
_a = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase, outputs.logits, atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 320 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> str:
return " ".join(
"""""".join(word[::-1] ) if len(_lowerCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 35 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["""pixel_values"""]
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Tuple = True ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : Optional[Any] = PILImageResampling.BILINEAR ,lowerCamelCase__ : Optional[int] = True ,lowerCamelCase__ : List[Any] = 1 / 255 ,lowerCamelCase__ : Tuple = True ,lowerCamelCase__ : List[str] = None ,lowerCamelCase__ : Any = None ,**lowerCamelCase__ : Optional[int] ,) -> int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 384}
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : int = PILImageResampling.BICUBIC ,lowerCamelCase__ : Tuple = None ,**lowerCamelCase__ : str ,) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowerCamelCase__ ,size=lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ ,size=(shortest_edge, shortest_edge) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ ,size=(shortest_edge, shortest_edge) ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : int ,) -> List[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any = None ,**lowerCamelCase__ : Dict ,) -> Any:
'''simple docstring'''
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : int = None ,lowerCamelCase__ : Union[str, Any] = None ,lowerCamelCase__ : Any = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Any = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : Dict = ChannelDimension.FIRST ,**lowerCamelCase__ : int ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,crop_pct=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 296 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase):
@slow
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
UpperCAmelCase_ : Any = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ : str = model(_A )["""last_hidden_state"""]
UpperCAmelCase_ : Union[str, Any] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 304 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
A__ : int = """us-east-1""" # defaults region
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Any = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
lowerCamelCase : Optional[int] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
lowerCamelCase : Dict = {**hyperparameters, """max_steps""": 1_0_0_0}
@property
def lowercase_ ( self ) -> Dict:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase_ ( self ) -> Dict:
return f'{self.framework}-transfromers-test'
@property
def lowercase_ ( self ) -> List[str]:
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def lowercase_ ( self ) -> Union[str, Any]:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> int:
__lowerCamelCase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 185 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase__: str = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] , __snake_case : List[str] , __snake_case : Dict = None , __snake_case : Tuple = None , __snake_case : str = None , __snake_case : Tuple = True , ) -> List[str]:
UpperCAmelCase : Any = [file for file in os.listdir(__snake_case ) if os.path.isfile(os.path.join(__snake_case , __snake_case ) )]
if identifier is not None:
UpperCAmelCase : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__snake_case , __snake_case ):
for n_ in n_identifier:
UpperCAmelCase : Any = [file for file in files if n_ not in file]
else:
UpperCAmelCase : Union[str, Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase : Any = ignore_files or []
ignore_files.append('''__init__.py''' )
UpperCAmelCase : Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __snake_case )
if only_modules:
UpperCAmelCase : List[Any] = file.split('''.''' )[0]
try:
UpperCAmelCase : List[Any] = getattr(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = doctest.DocTestSuite(__snake_case )
UpperCAmelCase : Optional[Any] = unittest.TextTestRunner().run(__snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase : List[str] = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase : Optional[Any] = Path('''src/transformers''' )
UpperCAmelCase : Dict = """modeling"""
UpperCAmelCase : Optional[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__snake_case , identifier=__snake_case , ignore_files=__snake_case )
def A ( self : Optional[int] ) -> int:
UpperCAmelCase : Any = Path('''src/transformers''' )
UpperCAmelCase : Union[str, Any] = """tokenization"""
self.analyze_directory(__snake_case , identifier=__snake_case )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : List[str] = Path('''src/transformers''' )
UpperCAmelCase : Dict = """configuration"""
self.analyze_directory(__snake_case , identifier=__snake_case )
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : List[str] = Path('''src/transformers''' )
UpperCAmelCase : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__snake_case , n_identifier=__snake_case )
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = Path('''docs/source''' )
UpperCAmelCase : Tuple = ["""favicon.ico"""]
self.analyze_directory(__snake_case , ignore_files=__snake_case , only_modules=__snake_case )
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : str = logging.get_logger(__name__)
__snake_case : int = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__snake_case : List[Any] = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Any = torch.load(a__ , map_location="""cpu""")
return sd
def _UpperCAmelCase ( a__ , a__ , a__=rename_keys_prefix):
'''simple docstring'''
a_ : str = OrderedDict()
a_ : Tuple = torch.arange(config.max_position_embeddings).expand((1, -1))
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
a_ : int = key
for name_pair in rename_keys_prefix:
a_ : Dict = new_key.replace(name_pair[0] , name_pair[1])
a_ : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
a_ : Any = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
assert (
checkpoint_path.split("""/""")[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
a_ : Any = """pretraining"""
if "vcr" in checkpoint_path:
a_ : Dict = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
a_ : Tuple = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
a_ : List[str] = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
a_ : int = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''')
else:
if "vcr" in checkpoint_path:
a_ : List[str] = {"""visual_embedding_dim""": 5_1_2}
a_ : Union[str, Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
a_ : List[Any] = {"""visual_embedding_dim""": 2_0_4_8}
a_ : str = """vqa_advanced"""
elif "vqa" in checkpoint_path:
a_ : str = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
a_ : Optional[int] = """vqa"""
elif "nlvr" in checkpoint_path:
a_ : Tuple = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
a_ : List[str] = """nlvr"""
a_ : Dict = VisualBertConfig(**a__)
# Load State Dict
a_ : List[Any] = load_state_dict(a__)
a_ : List[str] = get_new_dict(a__ , a__)
if model_type == "pretraining":
a_ : Optional[int] = VisualBertForPreTraining(a__)
elif model_type == "vqa":
a_ : Optional[int] = VisualBertForQuestionAnswering(a__)
elif model_type == "nlvr":
a_ : int = VisualBertForVisualReasoning(a__)
elif model_type == "multichoice":
a_ : Optional[int] = VisualBertForMultipleChoice(a__)
model.load_state_dict(a__)
# Save Checkpoints
Path(a__).mkdir(exist_ok=a__)
model.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__snake_case : Any = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 248 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=() , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]="no" , __lowerCAmelCase : Dict="29500" ):
"""simple docstring"""
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
lowerCAmelCase_ = True
elif "IPython" in sys.modules:
lowerCAmelCase_ = """google.colab""" in str(sys.modules["IPython"].get_ipython() )
try:
lowerCAmelCase_ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
lowerCAmelCase_ = 8
lowerCAmelCase_ = PrepareForLaunch(__lowerCAmelCase , distributed_type="TPU" )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCAmelCase , master_addr="127.0.01" , master_port=__lowerCAmelCase , mixed_precision=__lowerCAmelCase ):
lowerCAmelCase_ = PrepareForLaunch(__lowerCAmelCase , distributed_type="MULTI_GPU" )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase_ = """1"""
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=() , __lowerCAmelCase : Any=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__lowerCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
lowerCAmelCase_ = PrepareForLaunch(__lowerCAmelCase , debug=__lowerCAmelCase )
start_processes(__lowerCAmelCase , args=__lowerCAmelCase , nprocs=__lowerCAmelCase , start_method="fork" )
| 231 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> int:
lowercase__ : Optional[int] = """"""
lowercase__ : Dict = """"""
lowercase__ : Optional[int] = []
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ : str = self.__min_dist_top_down_dp(__lowerCAmelCase , n - 1 )
lowercase__ : Dict = self.__min_dist_top_down_dp(m - 1 , __lowerCAmelCase )
lowercase__ : Tuple = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ : int = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self.dp[m][n]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Union[str, Any] = worda
lowercase__ : Optional[int] = worda
lowercase__ : Tuple = [[-1 for _ in range(len(__lowerCAmelCase ) )] for _ in range(len(__lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCAmelCase ) - 1 , len(__lowerCAmelCase ) - 1 )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : Tuple = worda
lowercase__ : List[str] = worda
lowercase__ : str = len(__lowerCAmelCase )
lowercase__ : str = len(__lowerCAmelCase )
lowercase__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ : List[str] = j
elif j == 0: # second string is empty
lowercase__ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ : Any = self.dp[i - 1][j - 1]
else:
lowercase__ : List[str] = self.dp[i][j - 1]
lowercase__ : Any = self.dp[i - 1][j]
lowercase__ : Optional[int] = self.dp[i - 1][j - 1]
lowercase__ : int = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
__a: Dict = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__a: Optional[int] = input("""Enter the first string: """).strip()
__a: str = input("""Enter the second string: """).strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 198 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowercase = data_utils.TransfoXLTokenizer
__lowercase = data_utils.TransfoXLCorpus
__lowercase = data_utils
__lowercase = data_utils
def lowercase ( A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A_ , "rb" ) as fp:
a : Any = pickle.load(A_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a : List[str] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
a : Optional[Any] = corpus.vocab.__dict__
torch.save(A_ , A_ )
a : List[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , A_ )
a : Union[str, Any] = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(A_ , A_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a : int = os.path.abspath(A_ )
a : int = os.path.abspath(A_ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a : List[str] = TransfoXLConfig()
else:
a : Optional[int] = TransfoXLConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
a : Dict = TransfoXLLMHeadModel(A_ )
a : Union[str, Any] = load_tf_weights_in_transfo_xl(A_ , A_ , A_ )
# Save pytorch-model
a : Optional[int] = os.path.join(A_ , A_ )
a : Any = os.path.join(A_ , A_ )
print(F'''Save PyTorch model to {os.path.abspath(A_ )}''' )
torch.save(model.state_dict() , A_ )
print(F'''Save configuration file to {os.path.abspath(A_ )}''' )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 40 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
_a = t // 36_00, (t // 60) % 60, t % 60
return f'{h}:{m:02d}:{s:02d}' if h != 0 else f'{m:02d}:{s:02d}'
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int=3_00 ):
"""simple docstring"""
return f'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_a = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_a = f'{elt:.6f}' if isinstance(_lowerCAmelCase, _lowerCAmelCase ) else str(_lowerCAmelCase )
html_code += f' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCamelCase :
'''simple docstring'''
A_ : List[Any] = 5
A_ : Dict = 0.2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 300 , ) -> Tuple:
_a = total
_a = """""" if prefix is None else prefix
_a = leave
_a = parent
_a = width
_a = None
_a = None
_a = None
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ) -> Tuple:
_a = value
if comment is not None:
_a = comment
if self.last_value is None:
_a = time.time()
_a = value
_a = None
_a = self.warmup
_a = 1
self.update_bar(__UpperCAmelCase )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_a = time.time()
_a = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_a = self.elapsed_time / (value - self.start_value)
else:
_a = None
if value >= self.total:
_a = self.total
_a = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_a = self.average_time_per_item * (self.total - value)
self.update_bar(__UpperCAmelCase )
_a = value
_a = current_time
if self.average_time_per_item is None:
_a = 1
else:
_a = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
_a = """ """ * (len(str(self.total ) ) - len(str(__UpperCAmelCase ) )) + str(__UpperCAmelCase )
if self.elapsed_time is None:
_a = F'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_a = F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_a = (
F'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
F' {format_time(self.predicted_remaining )}'
)
self.label += F', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F', {self.comment}]'
self.display()
def _UpperCAmelCase ( self ) -> List[Any]:
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self ) -> Tuple:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]:
super().__init__(__UpperCAmelCase )
_a = None if column_names is None else [column_names]
_a = None
def _UpperCAmelCase ( self ) -> str:
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__UpperCAmelCase )
else:
self.output.update(disp.HTML(self.html_code ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.inner_table is None:
_a = [list(values.keys() ), list(values.values() )]
else:
_a = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__UpperCAmelCase )
_a = columns
self.inner_table.append([values[c] for c in columns] )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=300 ) -> Union[str, Any]:
_a = NotebookProgressBar(__UpperCAmelCase , prefix=__UpperCAmelCase , parent=self , width=__UpperCAmelCase )
return self.child_bar
def _UpperCAmelCase ( self ) -> Tuple:
_a = None
self.display()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
_a = None
_a = None
_a = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
_a = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
_a = 0
_a = 0
_a = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_a = NotebookTrainingTracker(state.max_steps , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
_a = int(state.epoch ) if int(state.epoch ) == state.epoch else F'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=F'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_a = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[Any]:
if not has_length(__UpperCAmelCase ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_a = self.training_tracker.add_child(len(__UpperCAmelCase ) )
else:
_a = NotebookProgressBar(len(__UpperCAmelCase ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any:
if self.prediction_bar is not None:
self.prediction_bar.close()
_a = None
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> str:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_a = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
_a = state.global_step
self.training_tracker.write_line(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
if self.training_tracker is not None:
_a = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
_a = log["""loss"""]
break
if self.first_column == "Epoch":
_a = int(state.epoch )
else:
_a = state.global_step
_a = """eval"""
for k in metrics:
if k.endswith('''_loss''' ):
_a = re.sub(r'''\_loss$''' , '''''' , __UpperCAmelCase )
_a = metrics.pop('''total_flos''' , __UpperCAmelCase )
_a = metrics.pop('''epoch''' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_runtime' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_samples_per_second' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_steps_per_second' , __UpperCAmelCase )
_a = metrics.pop(F'{metric_key_prefix}_jit_compilation_time' , __UpperCAmelCase )
for k, v in metrics.items():
if k == F'{metric_key_prefix}_loss':
_a = v
else:
_a = k.split('''_''' )
_a = """ """.join([part.capitalize() for part in splits[1:]] )
_a = v
self.training_tracker.write_line(__UpperCAmelCase )
self.training_tracker.remove_child()
_a = None
# Evaluation takes a long time so we should force the next update.
_a = True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
self.training_tracker.update(
state.global_step , comment=F'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=__UpperCAmelCase )
_a = None | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""{key} -> {new_key}""" )
SCREAMING_SNAKE_CASE = s_dict.pop(_SCREAMING_SNAKE_CASE )
return s_dict
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = os.path.basename(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = url.split("""/""" )[-2]
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = open(_SCREAMING_SNAKE_CASE , """rb""" ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=10_24 ) as loop:
while True:
SCREAMING_SNAKE_CASE = source.read(81_92 )
if not buffer:
break
output.write(_SCREAMING_SNAKE_CASE )
loop.update(len(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = open(_SCREAMING_SNAKE_CASE , """rb""" ).read()
if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
SCREAMING_SNAKE_CASE = original_checkpoint["""dims"""]
SCREAMING_SNAKE_CASE = original_checkpoint["""model_state_dict"""]
SCREAMING_SNAKE_CASE = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
SCREAMING_SNAKE_CASE = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE = proj_out_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 296 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCamelCase : List[str] = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE):
def __init__( self : List[Any] , *_A : Tuple , **_A : Tuple ) -> str:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 304 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : Any = """bert"""
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Dict = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : Tuple = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class UpperCAmelCase_ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
__lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 185 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase__: List[Any] = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase__: int = BASE_URL + "/user"
# https://github.com/settings/tokens
UpperCamelCase__: List[str] = os.environ.get("USER_TOKEN", "")
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = {
"""Authorization""": f"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 23 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
__snake_case : int = 8.314_4598
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""")
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__snake_case : str = 3_00
__snake_case : Dict = 28
__snake_case : Any = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 248 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
snake_case_ = logging.get_logger(__name__)
@dataclass
class A_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self :List[Any] , **lowercase_ :Optional[Any] ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase = deprecated_arg[3:]
UpperCAmelCase = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCAmelCase = kwargs.pop('tpu_name' , self.tpu_name )
UpperCAmelCase = kwargs.pop('device_idx' , self.device_idx )
UpperCAmelCase = kwargs.pop('eager_mode' , self.eager_mode )
UpperCAmelCase = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowercase_ )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name of TPU"""} , )
__UpperCamelCase = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
__UpperCamelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Benchmark models in eager model."""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['tf'] )
UpperCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase = None
return tpu
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCAmelCase__ ( self :List[str] ) -> Any:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def UpperCAmelCase__ ( self :int ) -> List[Any]:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
return self.n_gpu > 0
| 78 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_A = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_A = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
"""simple docstring"""
try:
lowerCAmelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] = "student" , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
lowerCAmelCase_ = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
lowerCAmelCase_ = teacher.config.to_diff_dict()
try:
lowerCAmelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase_ = teacher_e
if d is None:
lowerCAmelCase_ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
lowerCAmelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase_ = teacher_e
if d is None:
lowerCAmelCase_ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
lowerCAmelCase_ = teacher.config_class(**__lowerCAmelCase )
lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase_ = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase_ = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase_ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase_ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
lowerCAmelCase_ = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 231 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__a: str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__a: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 198 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """biogpt"""
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=42384 , __UpperCAmelCase : Any=1024 , __UpperCAmelCase : Any=24 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Dict=4096 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Tuple=1e-12 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : str=2 , **__UpperCAmelCase : Tuple , ):
a : Optional[Any] = vocab_size
a : int = max_position_embeddings
a : Union[str, Any] = hidden_size
a : Dict = num_hidden_layers
a : Dict = num_attention_heads
a : List[str] = intermediate_size
a : Union[str, Any] = hidden_act
a : str = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Tuple = initializer_range
a : Any = layer_norm_eps
a : Optional[int] = scale_embedding
a : Optional[int] = use_cache
a : List[str] = layerdrop
a : int = activation_dropout
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
| 40 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = name
_a = value
_a = weight
def __repr__( self ) -> Tuple:
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _UpperCAmelCase ( self ) -> str:
return self.value
def _UpperCAmelCase ( self ) -> Tuple:
return self.name
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.weight
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.value / self.weight
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : int ):
"""simple docstring"""
_a = sorted(_lowerCAmelCase, key=_lowerCAmelCase, reverse=_lowerCAmelCase )
_a = []
_a = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def A_ ( ):
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 320 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__a = pytest.mark.integration
__a = {"comet"}
__a = importlib.util.find_spec("fairseq") is not None
__a = {"code_eval"}
__a = os.name == "nt"
__a = {"bertscore", "frugalscore", "perplexity"}
__a = importlib.util.find_spec("transformers") is not None
def __snake_case( _lowerCAmelCase ) -> Any:
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
@wraps(_lowerCAmelCase )
def wrapper(self , _lowerCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , _lowerCAmelCase )
return wrapper
def __snake_case( ) -> Dict:
snake_case__ : Union[str, Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@local
class UpperCAmelCase_ ( parameterized.TestCase ):
"""simple docstring"""
lowercase = {}
lowercase = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def lowerCamelCase ( self : Any , snake_case_ : Optional[int] ):
snake_case__ : List[Any] = """[...]"""
snake_case__ : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , snake_case_ ) ).module_path )
snake_case__ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case_ )
# check parameters
snake_case__ : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case__ : List[str] = doctest.testmod(snake_case_ , verbose=snake_case_ , raise_on_error=snake_case_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[int] ):
snake_case__ : str = """[...]"""
snake_case__ : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , snake_case_ ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case__ : str = doctest.testmod(snake_case_ , verbose=snake_case_ , raise_on_error=snake_case_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : str ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case_ ):
yield
else:
yield
@contextmanager
def lowerCamelCase ( self : str ):
def load_local_metric(snake_case_ : Optional[Any] , *snake_case_ : str , **snake_case_ : Tuple ):
return load_metric(os.path.join("""metrics""" , snake_case_ ) , *snake_case_ , **snake_case_ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
snake_case__ : List[Any] = load_local_metric
yield
@classmethod
def lowerCamelCase ( cls : List[Any] , snake_case_ : Optional[Any] ):
def wrapper(snake_case_ : List[Any] ):
snake_case__ : int = contextmanager(snake_case_ )
snake_case__ : Optional[int] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def lowerCamelCase ( self : str , snake_case_ : List[str] ):
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
snake_case__ : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def __snake_case( _lowerCAmelCase ) -> Tuple:
import torch
def bert_cos_score_idf(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
snake_case__ : List[Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
def load_from_checkpoint(_lowerCAmelCase ):
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Tuple , snake_case_ : Dict , *snake_case_ : Any , **snake_case_ : int ):
assert len(snake_case_ ) == 2
snake_case__ : Tuple = [0.19, 0.92]
return scores, sum(snake_case_ ) / len(snake_case_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
snake_case__ : Optional[int] = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
snake_case__ : Dict = load_from_checkpoint
yield
def __snake_case( ) -> Dict:
snake_case__ : int = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
snake_case__ : int = """ERROR"""
snake_case__ : List[Any] = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(_lowerCAmelCase , match=re.escape(_lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_lowerCAmelCase )
| 35 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
SCREAMING_SNAKE_CASE_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 296 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__ ( unittest.TestCase):
@property
def A ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.dummy_uncond_unet
UpperCAmelCase_ : int = KarrasVeScheduler()
UpperCAmelCase_ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' ).images
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' , return_dict=_A )[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class snake_case__ ( unittest.TestCase):
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : Dict = """google/ncsnpp-celebahq-256"""
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained(_A )
UpperCAmelCase_ : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase_ : Tuple = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : Any = pipe(num_inference_steps=20 , generator=_A , output_type='''numpy''' ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 304 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
A__ : List[Any] = list[list[float | int]]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> int:
__lowerCamelCase : int = len(UpperCAmelCase_ )
__lowerCamelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase_ )]
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : float
for row in range(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = matrix[row][col]
__lowerCamelCase : str = vector[row][0]
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Any = 0
while row < size and col < size:
# pivoting
__lowerCamelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase_ , UpperCAmelCase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__lowerCamelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = augmented[rowa][col] / augmented[row][col]
__lowerCamelCase : str = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCAmelCase_ ):
for row in range(UpperCAmelCase_ ):
__lowerCamelCase : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase_ )
]
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> Dict:
__lowerCamelCase : int = len(UpperCAmelCase_ )
__lowerCamelCase : Matrix = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
__lowerCamelCase : Matrix = [[0] for _ in range(UpperCAmelCase_ )]
__lowerCamelCase : Matrix
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for x_val, y_val in enumerate(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
__lowerCamelCase : Optional[Any] = (x_val + 1) ** (size - col - 1)
__lowerCamelCase : List[str] = y_val
__lowerCamelCase : int = solve(UpperCAmelCase_ , UpperCAmelCase_ )
def interpolated_func(UpperCAmelCase_ : Dict ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase_ ) )
return interpolated_func
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Union[str, Any]:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase__ ( UpperCAmelCase_ : Dict = question_function , UpperCAmelCase_ : Dict = 10 ) -> Optional[int]:
__lowerCamelCase : list[int] = [func(UpperCAmelCase_ ) for x_val in range(1 , order + 1 )]
__lowerCamelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__lowerCamelCase : int = 0
__lowerCamelCase : Callable[[int], int]
__lowerCamelCase : int
for poly in polynomials:
__lowerCamelCase : Optional[Any] = 1
while func(UpperCAmelCase_ ) == poly(UpperCAmelCase_ ):
x_val += 1
ret += poly(UpperCAmelCase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 185 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__: int = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Any=None , ) -> int:
if attention_mask is None:
UpperCAmelCase : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=13 , __snake_case : List[str]=7 , __snake_case : Optional[Any]=True , __snake_case : int=False , __snake_case : str=99 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[Any]=4 , __snake_case : str=4 , __snake_case : Optional[int]="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : List[str]=32 , __snake_case : Dict=2 , __snake_case : List[Any]=1 , __snake_case : List[Any]=0 , __snake_case : Any=0.02 , ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Dict = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = eos_token_id
UpperCAmelCase : Any = pad_token_id
UpperCAmelCase : Tuple = bos_token_id
UpperCAmelCase : List[str] = initializer_range
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : Tuple = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase : Dict = shift_tokens_right(__snake_case , 1 , 2 )
UpperCAmelCase : str = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__snake_case , )
UpperCAmelCase : List[str] = prepare_blenderbot_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def A ( self : str ) -> List[str]:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def A ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Any:
UpperCAmelCase : Dict = 20
UpperCAmelCase : Tuple = model_class_name(__snake_case )
UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
UpperCAmelCase : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__snake_case , )
UpperCAmelCase : int = model.decode(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : str = 20
UpperCAmelCase : Tuple = model_class_name(__snake_case )
UpperCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , __snake_case , __snake_case )
UpperCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , __snake_case , decoder_attention_mask=__snake_case , past_key_values=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , __snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__snake_case , decoder_position_ids=__snake_case , )
UpperCAmelCase : str = model.decode(__snake_case , __snake_case , decoder_attention_mask=__snake_case )
UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = 99
def A ( self : str ) -> Optional[int]:
UpperCAmelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase : Dict = input_ids.shape[0]
UpperCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : int = self._get_config_and_data()
UpperCAmelCase : Optional[Any] = FlaxBlenderbotForConditionalGeneration(__snake_case )
UpperCAmelCase : List[Any] = lm_model(input_ids=__snake_case )
UpperCAmelCase : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A ( self : Any ) -> int:
UpperCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(__snake_case )
UpperCAmelCase : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase : Optional[int] = lm_model(input_ids=__snake_case , decoder_input_ids=__snake_case )
UpperCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __snake_case )
def A ( self : Any ) -> List[str]:
UpperCAmelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase : Any = shift_tokens_right(__snake_case , 1 , 2 )
UpperCAmelCase : Optional[Any] = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
UpperCAmelCase : int = np.equal(__snake_case , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__snake_case , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE( __SCREAMING_SNAKE_CASE , unittest.TestCase , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def A ( self : List[str] ) -> str:
UpperCAmelCase : Tuple = FlaxBlenderbotModelTester(self )
def A ( self : Any ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__snake_case , __snake_case , __snake_case )
def A ( self : List[str] ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__snake_case , __snake_case , __snake_case )
def A ( self : str ) -> Any:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : str = self._prepare_for_class(__snake_case , __snake_case )
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
@jax.jit
def encode_jitted(__snake_case : Tuple , __snake_case : Tuple=None , **__snake_case : Dict ):
return model.encode(input_ids=__snake_case , attention_mask=__snake_case )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase : Union[str, Any] = encode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase : List[str] = encode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Any = model_class(__snake_case )
UpperCAmelCase : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase : Optional[int] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__snake_case : Dict , __snake_case : int , __snake_case : str ):
return model.decode(
decoder_input_ids=__snake_case , decoder_attention_mask=__snake_case , encoder_outputs=__snake_case , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase : Union[str, Any] = decode_jitted(**__snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase : Union[str, Any] = decode_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Tuple ) -> Tuple:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase : Any = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase : Tuple = model(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Any = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
UpperCAmelCase : Tuple = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
UpperCAmelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__snake_case )
UpperCAmelCase : List[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase : Union[str, Any] = ["""Sam"""]
UpperCAmelCase : Optional[Any] = tokenizer(__snake_case , return_tensors='''jax''' )
UpperCAmelCase : Optional[Any] = model.generate(**__snake_case , **__snake_case )
UpperCAmelCase : Optional[int] = """Sam is a great name. It means \"sun\" in Gaelic."""
UpperCAmelCase : Tuple = tokenizer.batch_decode(__snake_case , **__snake_case )
assert generated_txt[0].strip() == tgt_text
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A__:
"""simple docstring"""
_A : List[Any] = BlenderbotSmallConfig
_A : Any = {}
_A : Tuple = """gelu"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=False , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=20 , _lowercase=2 , _lowercase=1 , _lowercase=0 , ) -> List[Any]:
a_ : List[str] = parent
a_ : Optional[int] = batch_size
a_ : str = seq_length
a_ : int = is_training
a_ : Optional[Any] = use_labels
a_ : List[Any] = vocab_size
a_ : Optional[Any] = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : Optional[int] = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : int = hidden_dropout_prob
a_ : Dict = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : str = eos_token_id
a_ : Dict = pad_token_id
a_ : Tuple = bos_token_id
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ : Any = prepare_blenderbot_small_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> str:
a_ : List[Any] = TFBlenderbotSmallModel(config=_lowercase ).get_decoder()
a_ : List[str] = inputs_dict["""input_ids"""]
a_ : int = input_ids[:1, :]
a_ : str = inputs_dict["""attention_mask"""][:1, :]
a_ : Tuple = inputs_dict["""head_mask"""]
a_ : int = 1
# first forward pass
a_ : List[str] = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
a_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ : int = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ : int = model(_lowercase , attention_mask=_lowercase )[0]
a_ : Optional[int] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
a_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1e-3 )
def _UpperCAmelCase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ):
'''simple docstring'''
if attention_mask is None:
a_ : Union[str, Any] = tf.cast(tf.math.not_equal(a__ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
a_ : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
a_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
a_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
a_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
_A : str = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_A : Any = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_A : int = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[Any] = True
_A : str = False
_A : Any = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : int = TFBlenderbotSmallModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=_lowercase )
def UpperCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> int:
a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
@require_tokenizers
@require_tf
class A__(unittest.TestCase ):
"""simple docstring"""
_A : int = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
_A : Tuple = """facebook/blenderbot_small-90M"""
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ) -> Tuple:
a_ : List[str] = self.tokenizer(self.src_text , return_tensors="""tf""" )
a_ : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowercase , )
a_ : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 248 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
from collections import deque
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = deque()
UpperCAmelCase = [False for _ in range(lowercase_ )]
UpperCAmelCase = [-1 for _ in range(lowercase_ )]
UpperCAmelCase = index_of[:]
def strong_connect(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = index # the number when this node is seen
UpperCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase_ )
UpperCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
UpperCAmelCase = strong_connect(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCAmelCase = []
UpperCAmelCase = stack.pop()
UpperCAmelCase = False
component.append(lowercase_ )
while w != v:
UpperCAmelCase = stack.pop()
UpperCAmelCase = False
component.append(lowercase_ )
components.append(lowercase_ )
return index
UpperCAmelCase = []
for v in range(lowercase_ ):
if index_of[v] == -1:
strong_connect(lowercase_ , 0 , lowercase_ )
return components
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [[] for _ in range(lowercase_ )]
for u, v in edges:
g[u].append(lowercase_ )
return g
if __name__ == "__main__":
# Test
snake_case_ = 7
snake_case_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
snake_case_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
snake_case_ = [(u, v) for u, v in zip(source, target)]
snake_case_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ShapEPipeline
SCREAMING_SNAKE_CASE = ["""prompt"""]
SCREAMING_SNAKE_CASE = ["""prompt"""]
SCREAMING_SNAKE_CASE = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE = False
@property
def _lowerCAmelCase( self ) -> Tuple:
return 32
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
return 32
@property
def _lowerCAmelCase( self ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCAmelCase( self ) -> Tuple:
return 8
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowerCAmelCase( self ) -> Dict:
torch.manual_seed(0 )
lowercase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowerCAmelCase )
@property
def _lowerCAmelCase( self ) -> int:
torch.manual_seed(0 )
lowercase__ : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowercase__ : Dict = PriorTransformer(**__lowerCAmelCase )
return model
@property
def _lowerCAmelCase( self ) -> str:
torch.manual_seed(0 )
lowercase__ : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowercase__ : List[Any] = ShapERenderer(**__lowerCAmelCase )
return model
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[Any] = self.dummy_prior
lowercase__ : str = self.dummy_text_encoder
lowercase__ : int = self.dummy_tokenizer
lowercase__ : int = self.dummy_renderer
lowercase__ : Tuple = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=__lowerCAmelCase , clip_sample=__lowerCAmelCase , clip_sample_range=1.0 , )
lowercase__ : Optional[int] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> int:
if str(__lowerCAmelCase ).startswith('''mps''' ):
lowercase__ : List[str] = torch.manual_seed(__lowerCAmelCase )
else:
lowercase__ : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase__ : int = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = """cpu"""
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Optional[Any] = self.pipeline_class(**__lowerCAmelCase )
lowercase__ : List[str] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase__ : Optional[Any] = output.images[0]
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ : List[str] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : str = torch_device == """cpu"""
lowercase__ : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Any = self.get_dummy_components()
lowercase__ : int = self.pipeline_class(**__lowerCAmelCase )
lowercase__ : Optional[Any] = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : Optional[int] = 1
lowercase__ : Dict = 2
lowercase__ : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ : List[Any] = batch_size * [inputs[key]]
lowercase__ : int = pipe(**__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
lowercase__ : List[str] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
lowercase__ : str = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase__ : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase__ : Optional[int] = pipe(
'''a shark''' , generator=__lowerCAmelCase , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 198 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
a : Any = (l + r) // 2
if v[m] >= key:
a : List[str] = m
else:
a : Dict = m # noqa: E741
return r
def lowercase ( A_ )-> str:
'''simple docstring'''
if len(A_ ) == 0:
return 0
a : List[Any] = [0] * len(A_ )
a : Optional[int] = 1
a : str = v[0]
for i in range(1 , len(A_ ) ):
if v[i] < tail[0]:
a : int = v[i]
elif v[i] > tail[length - 1]:
a : Optional[Any] = v[i]
length += 1
else:
a : Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , snake_case_ : List[Any] , snake_case_ : Any=13 , snake_case_ : Dict=30 , snake_case_ : Any=2 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : Dict=32 , snake_case_ : Optional[Any]=2 , snake_case_ : Union[str, Any]=4 , snake_case_ : List[Any]=37 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Dict=10 , snake_case_ : Dict=0.02 , snake_case_ : str=3 , snake_case_ : str=None , snake_case_ : int=2 , ):
snake_case__ : Any = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : str = image_size
snake_case__ : str = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Union[str, Any] = is_training
snake_case__ : Optional[Any] = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : str = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case__ : Tuple = (image_size // patch_size) ** 2
snake_case__ : List[Any] = num_patches + 2
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Tuple ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Union[str, Any] ):
snake_case__ : Dict = TFDeiTModel(config=snake_case_ )
snake_case__ : Any = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Tuple ):
snake_case__ : List[Any] = TFDeiTForMaskedImageModeling(config=snake_case_ )
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case__ : int = 1
snake_case__ : Optional[int] = TFDeiTForMaskedImageModeling(snake_case_ )
snake_case__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : Dict = self.type_sequence_label_size
snake_case__ : Optional[Any] = TFDeiTForImageClassification(snake_case_ )
snake_case__ : List[str] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : str = 1
snake_case__ : List[Any] = TFDeiTForImageClassification(snake_case_ )
snake_case__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : Tuple = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ : Dict = config_and_inputs
snake_case__ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : List[str] ):
snake_case__ : Tuple = TFDeiTModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase ( self : Optional[int] ):
pass
def lowerCamelCase ( self : str ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Dense ) )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(snake_case_ )
snake_case__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[Any] = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[int]=False ):
snake_case__ : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCamelCase ( self : List[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = TFDeiTModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Union[str, Any]:
snake_case__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : Optional[int] ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
snake_case__ : Dict = self.default_image_processor
snake_case__ : List[str] = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[Any] = model(**snake_case_ )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Optional[Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 35 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
from math import factorial
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
SCREAMING_SNAKE_CASE = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE = float(factorial(_SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(_SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 296 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( A : Optional[Any]=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCAmelCase_ : int = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ : int = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=A )
return parser
def __UpperCAmelCase ( A : Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ : Optional[int] = script_name
else:
UpperCAmelCase_ : int = F"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ : Union[str, Any] = ["""accelerate-launch"""] + test_args.split()
UpperCAmelCase_ : Optional[int] = execute_subprocess_async(A , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : str = test_command_parser()
UpperCAmelCase_ : Dict = parser.parse_args()
test_command(A )
if __name__ == "__main__":
main()
| 304 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 185 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
'''simple docstring'''
UpperCamelCase__: Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase__: List[str] = [None] * 10000000
UpperCamelCase__: Union[str, Any] = True
UpperCamelCase__: Any = False
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Optional[int]:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : Tuple = chain(next_number(_lowerCAmelCase ) )
UpperCAmelCase : Dict = number_chain
while number < 10000000:
UpperCAmelCase : Optional[int] = number_chain
number *= 10
return number_chain
def snake_case_ ( _lowerCAmelCase : Union[str, Any] = 10000000 ) -> Tuple:
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 23 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__snake_case : List[Any] = TypeVar("""T""")
class A__(Generic[T] ):
"""simple docstring"""
def __init__( self , _lowercase = True ) -> str:
a_ : dict[T, list[T]] = {} # dictionary of lists
a_ : Dict = directed
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> str:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
self.adj_list[destination_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
a_ : Any = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowercase )
a_ : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a_ : Any = [destination_vertex]
a_ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
a_ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a_ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a_ : Optional[Any] = [destination_vertex]
a_ : Tuple = []
return self
def __repr__( self ) -> Tuple:
return pformat(self.adj_list )
| 248 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase = 0
@slow
def UpperCAmelCase__ ( self :int ) -> int:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ) , 0 )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , config=lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase_ , 'vocab.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='bert' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase_ , 'merges.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='gpt2' , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowercase_ , 'vocab.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='bert' )
self.assertIsInstance(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowercase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowercase_ , 'merges.txt' ) )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , tokenizer_type='gpt2' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_ , lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def UpperCAmelCase__ ( self :int ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_ , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
UpperCAmelCase = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def UpperCAmelCase__ ( self :List[Any] ) -> List[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase = TOKENIZER_MAPPING.values()
UpperCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase_ ) , lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , lowercase_ )
@require_tokenizers
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=lowercase_ )
UpperCAmelCase = """Hello, world. How are you?"""
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]' , tokens[0] )
UpperCAmelCase = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=lowercase_ )
UpperCAmelCase = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def UpperCAmelCase__ ( self :int ) -> Dict:
UpperCAmelCase = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowercase_ ) , lowercase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCAmelCase__ ( self :Tuple ) -> List[str]:
UpperCAmelCase = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase = get_tokenizer_config('bert-base-cased' )
UpperCAmelCase = config.pop('_commit_hash' , lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_ , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase = get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
try:
AutoConfig.register('custom' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
UpperCAmelCase = CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
try:
AutoConfig.register('custom' , lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_ , slow_tokenizer_class=lowercase_ , fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self :int ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
class A_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCamelCase = False
class A_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCamelCase = NewTokenizer
__UpperCamelCase = False
try:
AutoConfig.register('custom' , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_ , fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
UpperCAmelCase = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowercase_ , use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def UpperCAmelCase__ ( self :Any ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase = AutoTokenizer.from_pretrained('bert-base' )
def UpperCAmelCase__ ( self :List[Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase__ ( self :str ) -> Dict:
# Make sure we have cached the tokenizer.
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 78 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_A = numpy.array([0, 0])
_A = numpy.array([0.5, 0.8_660_254])
_A = numpy.array([1, 0])
_A = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = initial_vectors
for _ in range(__lowerCAmelCase ):
lowerCAmelCase_ = iteration_step(__lowerCAmelCase )
return vectors
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase_ = vectors[i + 1]
new_vectors.append(__lowerCAmelCase )
lowerCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = numpy.radians(__lowerCAmelCase )
lowerCAmelCase_ = numpy.cos(__lowerCAmelCase ), numpy.sin(__lowerCAmelCase )
lowerCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase_ = zip(*__lowerCAmelCase )
plt.plot(__lowerCAmelCase , __lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 231 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> Any:
lowercase__ : Union[str, Any] = parent
lowercase__ : List[str] = 13
lowercase__ : Union[str, Any] = 7
lowercase__ : int = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : List[Any] = True
lowercase__ : Union[str, Any] = 99
lowercase__ : List[str] = 32
lowercase__ : Optional[Any] = 2
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 37
lowercase__ : Union[str, Any] = """gelu"""
lowercase__ : str = 0.1
lowercase__ : List[str] = 0.1
lowercase__ : str = 512
lowercase__ : int = 16
lowercase__ : List[str] = 2
lowercase__ : Optional[Any] = 0.0_2
lowercase__ : Optional[Any] = 3
lowercase__ : List[Any] = 4
lowercase__ : Dict = None
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Optional[int] = None
if self.use_input_mask:
lowercase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[Any] = None
if self.use_token_type_ids:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Optional[Any] = None
lowercase__ : Any = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = TFRoFormerModel(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase__ : int = [input_ids, input_mask]
lowercase__ : Any = model(__lowerCAmelCase )
lowercase__ : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : str = True
lowercase__ : Tuple = TFRoFormerForCausalLM(config=__lowerCAmelCase )
lowercase__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ : Tuple = model(__lowerCAmelCase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
lowercase__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
lowercase__ : Dict = self.num_labels
lowercase__ : Optional[int] = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
lowercase__ : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
lowercase__ : Any = self.num_choices
lowercase__ : str = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
lowercase__ : List[str] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Dict = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase__ : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : List[Any] = self.num_labels
lowercase__ : List[str] = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
lowercase__ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
lowercase__ : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase__ : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Any = self.prepare_config_and_inputs()
(
lowercase__
) : Optional[int] = config_and_inputs
lowercase__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : str = TFRoFormerModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Tuple:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Tuple = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Any = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
lowercase__ : Optional[Any] = 50000
lowercase__ : Union[str, Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ : Union[str, Any] = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = tf.constant([[4, 10]] )
lowercase__ : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase__ : Optional[Any] = emba(input_ids.shape )
lowercase__ : Tuple = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ : List[str] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1e-4
def _lowerCAmelCase( self ) -> Optional[Any]:
# 2,12,16,64
lowercase__ : Union[str, Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Dict = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase__ : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase__ : List[str] = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Union[str, Any] = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase__ : str = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 198 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=13 , __UpperCAmelCase : str=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[Any]=99 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Optional[int]=5 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Any=4 , ):
a : Any = parent
a : Optional[Any] = batch_size
a : str = seq_length
a : str = is_training
a : str = use_attention_mask
a : Any = use_token_type_ids
a : Dict = use_labels
a : str = vocab_size
a : Any = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Tuple = intermediate_size
a : Any = hidden_act
a : List[Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : int = max_position_embeddings
a : str = type_vocab_size
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : str = num_choices
def __snake_case ( self : Optional[Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Optional[int] = None
if self.use_attention_mask:
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : str = None
if self.use_token_type_ids:
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : int = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case ( self : Optional[Any]):
a : Dict = self.prepare_config_and_inputs()
a : Union[str, Any] = config_and_inputs
a : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __snake_case ( self : str):
a : Union[str, Any] = self.prepare_config_and_inputs()
a : int = config_and_inputs
a : Tuple = True
a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = True
UpperCAmelCase : Any = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case ( self : int):
a : Optional[Any] = FlaxRobertaModelTester(self)
@slow
def __snake_case ( self : Tuple):
for model_class_name in self.all_model_classes:
a : Tuple = model_class_name.from_pretrained("roberta-base" , from_pt=__UpperCAmelCase)
a : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(__UpperCAmelCase)
| 40 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__snake_case = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ : int = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = ["""input_ids""", """attention_mask"""]
A_ : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Tuple:
_a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __UpperCAmelCase ) -> Dict:
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
return self.sp_model.piece_to_id(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = []
_a = """"""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_a = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Dict:
_a = kwargs.pop('''use_source_tokenizer''' , __UpperCAmelCase )
_a = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_a = []
_a = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_a = []
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_a = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(__UpperCAmelCase ) )
else:
_a = """""".join(__UpperCAmelCase )
_a = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_a = self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Dict:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> Optional[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 320 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
'''simple docstring'''
from timeit import timeit
__a = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __snake_case( _lowerCAmelCase ) -> Any:
snake_case__ : Union[str, Any] = 0
snake_case__ : int = len(_lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Any = len(_lowerCAmelCase ) // 2
snake_case__ : Optional[Any] = len(_lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> List[str]:
if len(_lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(_lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __snake_case( _lowerCAmelCase ) -> str:
return s == s[::-1]
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : str = f"all({name}(key) is value for key, value in test_data.items())"
snake_case__ : Optional[Any] = f"from __main__ import test_data, {name}"
snake_case__ : int = 500_000
snake_case__ : int = timeit(stmt=_lowerCAmelCase , setup=_lowerCAmelCase , number=_lowerCAmelCase )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 35 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
from manim import *
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 ,width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
SCREAMING_SNAKE_CASE = Text("""CPU""" ,font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
SCREAMING_SNAKE_CASE = Text("""GPU""" ,font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ ,lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
SCREAMING_SNAKE_CASE = Text("""Model""" ,font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,)
SCREAMING_SNAKE_CASE = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.""" ,font_size=24 ,)
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ,run_time=2.5 ) ,Write(lowerCamelCase__ ) ,Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ ,opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE = 0.46 / 4
SCREAMING_SNAKE_CASE = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase__ ,buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ ,run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 296 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = [ord(_A ) for i in text]
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = []
for i in plain:
UpperCAmelCase_ : List[str] = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : Optional[Any] , _A : int ) -> int:
UpperCAmelCase_ : Optional[Any] = []
for i in range(len(_A ) ):
UpperCAmelCase_ : List[str] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Optional[int] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
'''simple docstring'''
A__ : str = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 185 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> List[Any]:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
UpperCAmelCase : Any = 4
UpperCAmelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : str = field(default='''automatic-speech-recognition''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_A : ClassVar[Features] = Features({'''audio''': Audio()} )
_A : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_A : str = "audio"
_A : str = "transcription"
def UpperCamelCase__ ( self , _lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a_ : Optional[Any] = copy.deepcopy(self )
a_ : List[Any] = self.input_schema.copy()
a_ : Tuple = features[self.audio_column]
a_ : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 248 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
snake_case_ = logging.getLogger(__name__)
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = train_dataset.features["""label"""].names
if training_args.do_eval:
UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = eval_dataset.features["""label"""].names
if training_args.do_predict:
UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = predict_dataset.features["""label"""].names
# Labels
UpperCAmelCase = len(lowercase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} , labelaid={label: i for i, label in enumerate(lowercase_ )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase = False
def preprocess_function(lowercase_ ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=lowercase_ , max_length=data_args.max_seq_length , truncation=lowercase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(lowercase_ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
UpperCAmelCase = train_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase_ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(lowercase_ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
UpperCAmelCase = eval_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase = min(len(lowercase_ ) , data_args.max_predict_samples )
UpperCAmelCase = predict_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
UpperCAmelCase = predict_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
UpperCAmelCase = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , lowercase_ ) else p.predictions
UpperCAmelCase = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase = default_data_collator
elif training_args.fpaa:
UpperCAmelCase = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 )
else:
UpperCAmelCase = None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase_ )
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
UpperCAmelCase = min(lowercase_ , len(lowercase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowercase_ )
trainer.save_metrics('train' , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase = trainer.evaluate(eval_dataset=lowercase_ )
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
UpperCAmelCase = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
UpperCAmelCase = trainer.predict(lowercase_ , metric_key_prefix='predict' )
UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ )
)
UpperCAmelCase = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('predict' , lowercase_ )
trainer.save_metrics('predict' , lowercase_ )
UpperCAmelCase = np.argmax(lowercase_ , axis=1 )
UpperCAmelCase = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(lowercase_ , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(lowercase_ ):
UpperCAmelCase = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , ) -> List[Any]:
lowerCAmelCase_ = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
def __a ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase =ImageGPTImageProcessor if is_vision_available() else None
def __a ( self ) -> Dict:
lowerCAmelCase_ = ImageGPTImageProcessingTester(self )
@property
def __a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "clusters" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = os.path.join(_UpperCamelCase , "image_processor.json" )
image_processor_first.to_json_file(_UpperCamelCase )
lowerCAmelCase_ = self.image_processing_class.from_json_file(_UpperCamelCase ).to_dict()
lowerCAmelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = self.image_processing_class.from_pretrained(_UpperCamelCase ).to_dict()
lowerCAmelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _UpperCamelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __a ( self ) -> str:
pass
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCAmelCase_ = Image.open(dataset[4]["file"] )
lowerCAmelCase_ = Image.open(dataset[5]["file"] )
lowerCAmelCase_ = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowerCAmelCase_ = prepare_images()
# test non-batched
lowerCAmelCase_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowerCAmelCase_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _UpperCamelCase )
# test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowerCAmelCase_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _UpperCamelCase )
| 231 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Dict = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowercase__ : List[Any] = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''c'''] )
self.assertEqual(__lowerCAmelCase , [2] )
# Out indices set to match out features
lowercase__ : Tuple = get_aligned_output_features_output_indices(['''a''', '''c'''] , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features set to match out indices
lowercase__ : Optional[int] = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
lowercase__ : Union[str, Any] = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ['''a''', '''c'''] )
self.assertEqual(__lowerCAmelCase , [-3, -1] )
def _lowerCAmelCase( self ) -> Any:
# Stage names must be set
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __lowerCAmelCase )
# Out features must be a list
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : str = BackboneMixin()
lowercase__ : Optional[int] = ["""a""", """b""", """c"""]
lowercase__ : Tuple = ["""a""", """c"""]
lowercase__ : Any = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase__ : Any = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase__ : List[str] = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 198 |
def _a ( lowerCamelCase = 100_0000 ):
lowerCamelCase : Any = set(range(3, lowerCamelCase, 2 ) )
primes.add(2 )
for p in range(3, lowerCamelCase, 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p, lowerCamelCase, lowerCamelCase ) ) )
lowerCamelCase : Any = [float(lowerCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase, limit + 1, lowerCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
__lowercase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def lowercase ( A_ , A_ = 1 , A_ = "new" , A_ = None )-> int:
'''simple docstring'''
a : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A_ ) - valid_terms ) ):
a : Tuple = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(A_ )
a : Tuple = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
a : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A_ )}
a : Dict = {}
for id_ in range(A_ ):
a : Dict = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 40 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a ( lowerCamelCase ):
return "".join(sorted(lowerCamelCase ) )
def _a ( lowerCamelCase ):
return word_by_signature[signature(lowerCamelCase )]
_lowerCamelCase =Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_lowerCamelCase =sorted({word.strip().lower() for word in data.splitlines()})
_lowerCamelCase =collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_lowerCamelCase ={word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 287 | 0 |
"""simple docstring"""
import math
def A_ ( _lowerCAmelCase : str = 1_00 ):
"""simple docstring"""
_a = sum(i * i for i in range(1, n + 1 ) )
_a = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }') | 320 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = PILImageResampling.BILINEAR , __magic_name__ = True , __magic_name__ = 1 / 2_5_5 , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = size if size is not None else {"""shortest_edge""": 3_8_4}
lowerCamelCase : Tuple = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Dict = do_resize
lowerCamelCase : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : Any = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
lowerCamelCase : Union[str, Any] = resample
lowerCamelCase : str = do_rescale
lowerCamelCase : Union[str, Any] = rescale_factor
lowerCamelCase : Tuple = do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = PILImageResampling.BICUBIC , __magic_name__ = None , **__magic_name__ , ):
lowerCamelCase : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
lowerCamelCase : str = size["""shortest_edge"""]
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : List[str] = int(shortest_edge / crop_pct )
lowerCamelCase : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : Optional[int] = resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__magic_name__ , size=(shortest_edge, shortest_edge) , data_format=__magic_name__ , **__magic_name__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__magic_name__ , size=(shortest_edge, shortest_edge) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , **__magic_name__ , ):
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = ChannelDimension.FIRST , **__magic_name__ , ):
lowerCamelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Dict = size if size is not None else self.size
lowerCamelCase : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCamelCase : List[str] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : Optional[Any] = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCamelCase : List[Any] = [self.resize(image=__magic_name__ , size=__magic_name__ , crop_pct=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCamelCase : Union[str, Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCamelCase : List[Any] = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCamelCase : Optional[int] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCamelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 287 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = k_size // 2
snake_case__ : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
snake_case__ : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCAmelCase ) + square(_lowerCAmelCase )) / (2 * square(_lowerCAmelCase )) )
return g
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
snake_case__ : str = image.shape[0], image.shape[1]
# dst image height and width
snake_case__ : str = height - k_size + 1
snake_case__ : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
snake_case__ : int = zeros((dst_height * dst_width, k_size * k_size) )
snake_case__ : Tuple = 0
for i, j in product(range(_lowerCAmelCase ) , range(_lowerCAmelCase ) ):
snake_case__ : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
snake_case__ : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
snake_case__ : Union[str, Any] = gen_gaussian_kernel(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : List[Any] = ravel(_lowerCAmelCase )
# reshape and get the dst image
snake_case__ : Dict = dot(_lowerCAmelCase , _lowerCAmelCase ).reshape(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__a = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__a = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__a = gaussian_filter(gray, 3, sigma=1)
__a = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 35 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[int] ) -> int:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 296 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( A : Optional[int]=2 , A : Any=3 , A : int=1_6 , A : Optional[Any] = 1_0 , A : int = 2 ) -> List[Any]:
def get_dataset(A : Tuple ):
UpperCAmelCase_ : List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Optional[int] = get_dataset(A )
UpperCAmelCase_ : str = get_dataset(A )
UpperCAmelCase_ : Dict = DataLoader(A , shuffle=A , batch_size=A , num_workers=4 )
UpperCAmelCase_ : Optional[int] = DataLoader(A , shuffle=A , batch_size=A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCAmelCase ( A : List[str] , A : Tuple , A : Dict , A : List[Any] , A : Union[str, Any] , A : int=None ) -> Tuple:
UpperCAmelCase_ : Any = []
for epoch in range(A ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ : Dict = batch
UpperCAmelCase_ : Tuple = model(A )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(A , A )
accelerator.backward(A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case__ ( nn.Module):
def __init__( self : Optional[Any] ) -> Dict:
super().__init__()
UpperCAmelCase_ : Tuple = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
def A ( self : List[Any] , _A : List[str] ) -> Optional[Any]:
return x * self.a + self.b
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[int] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : List[Any] = dummy_dataloaders()
UpperCAmelCase_ : int = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
UpperCAmelCase_ : int = Accelerator(project_config=_A )
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Dict ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : List[Any] = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ : List[Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
UpperCAmelCase_ : int = os.path.join(_A , '''initial''' )
accelerator.save_state(_A )
(UpperCAmelCase_) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : str = train(3 , _A , _A , _A , _A )
(UpperCAmelCase_) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Any = dummy_dataloaders()
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ : List[str] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
(UpperCAmelCase_) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : int = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
UpperCAmelCase_ : Tuple = train(2 , _A , _A , _A , _A )
# Save everything
UpperCAmelCase_ : List[Any] = os.path.join(_A , '''checkpoint''' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
(UpperCAmelCase_) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : str = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def A ( self : Any ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_A , project_config=_A )
UpperCAmelCase_ : Tuple = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
(UpperCAmelCase_) : Dict = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Tuple = train(3 , _A , _A , _A , _A )
(UpperCAmelCase_) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : List[Any] = dummy_dataloaders()
UpperCAmelCase_ : int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
UpperCAmelCase_ : str = Accelerator(project_dir=_A , project_config=_A )
UpperCAmelCase_ : List[str] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
(UpperCAmelCase_) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : List[str] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
UpperCAmelCase_ : Optional[Any] = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _A , _A , _A , _A )
(UpperCAmelCase_) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def A ( self : Any ) -> Any:
UpperCAmelCase_ : List[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : int = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
UpperCAmelCase_ : Dict = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A ( self : str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCAmelCase_ : Tuple = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
UpperCAmelCase_ : List[Any] = dummy_dataloaders()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
UpperCAmelCase_ : Optional[Any] = Accelerator(project_dir=_A , project_config=_A )
UpperCAmelCase_ : int = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_A , scheduler.state_dict() )
def A ( self : List[str] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase_ : List[Any] = DummyModel()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[Any] = Accelerator(project_dir=_A , project_config=_A )
UpperCAmelCase_ : Optional[int] = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCamelCase : str = '/tmp/accelerate/state_checkpointing'
_UpperCamelCase : List[Any] = DummyModel()
_UpperCamelCase : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_UpperCamelCase : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_UpperCamelCase , _UpperCamelCase : str = dummy_dataloaders()
_UpperCamelCase : str = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_UpperCamelCase : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_UpperCamelCase , _UpperCamelCase : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_UpperCamelCase : int = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_UpperCamelCase : str = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_UpperCamelCase : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_UpperCamelCase : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 304 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase =get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ = None ):
lowerCamelCase : Dict = (
os.path.join(__magic_name__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCamelCase : List[str] = Extractor
def UpperCamelCase__ ( self , __magic_name__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCamelCase : int = os.path.abspath(__magic_name__ )
return os.path.join(self.extract_dir , hash_url_to_filename(__magic_name__ ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
return force_extract or (
not os.path.isfile(__magic_name__ ) and not (os.path.isdir(__magic_name__ ) and os.listdir(__magic_name__ ))
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = False ):
lowerCamelCase : Union[str, Any] = self.extractor.infer_extractor_format(__magic_name__ )
if not extractor_format:
return input_path
lowerCamelCase : int = self._get_output_path(__magic_name__ )
if self._do_extract(__magic_name__ , __magic_name__ ):
self.extractor.extract(__magic_name__ , __magic_name__ , __magic_name__ )
return output_path
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
...
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , """rb""" ) as f:
return f.read(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if not magic_number:
lowerCamelCase : Optional[Any] = max(len(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
try:
lowerCamelCase : Tuple = cls.read_magic_number(__magic_name__ , __magic_name__ )
except OSError:
return False
return any(magic_number.startswith(__magic_name__ ) for cls_magic_number in cls.magic_numbers )
class A__ ( __SCREAMING_SNAKE_CASE):
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
return tarfile.is_tarfile(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
def resolved(__magic_name__ ) -> str:
return os.path.realpath(os.path.abspath(__magic_name__ ) )
def badpath(__magic_name__ , __magic_name__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__magic_name__ , __magic_name__ ) ).startswith(__magic_name__ )
def badlink(__magic_name__ , __magic_name__ ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCamelCase : List[str] = resolved(os.path.join(__magic_name__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=__magic_name__ )
lowerCamelCase : Optional[Any] = resolved(__magic_name__ )
for finfo in members:
if badpath(finfo.name , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(__magic_name__ , __magic_name__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Dict = tarfile.open(__magic_name__ )
tar_file.extractall(__magic_name__ , members=TarExtractor.safemembers(__magic_name__ , __magic_name__ ) )
tar_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with gzip.open(__magic_name__ , """rb""" ) as gzip_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = b"" ):
if super().is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__magic_name__ , """rb""" ) as fp:
lowerCamelCase : List[str] = _EndRecData(__magic_name__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCamelCase : List[Any] = fp.read(__magic_name__ ) # CD is where we expect it to be
if len(__magic_name__ ) == sizeCentralDir:
lowerCamelCase : str = struct.unpack(__magic_name__ , __magic_name__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with zipfile.ZipFile(__magic_name__ , """r""" ) as zip_file:
zip_file.extractall(__magic_name__ )
zip_file.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with lzma.open(__magic_name__ ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Union[str, Any] = rarfile.RarFile(__magic_name__ )
rf.extractall(__magic_name__ )
rf.close()
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCamelCase : int = zstd.ZstdDecompressor()
with open(__magic_name__ , """rb""" ) as ifh, open(__magic_name__ , """wb""" ) as ofh:
dctx.copy_stream(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
with bza.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
with pyazr.SevenZipFile(__magic_name__ , """r""" ) as archive:
archive.extractall(__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(__magic_name__ , """rb""" ) as compressed_file:
with open(__magic_name__ , """wb""" ) as extracted_file:
shutil.copyfileobj(__magic_name__ , __magic_name__ )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_UpperCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ):
return max(
len(__magic_name__ )
for extractor in cls.extractors.values()
if issubclass(__magic_name__ , __magic_name__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(__magic_name__ , magic_number_length=__magic_name__ )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = cls.infer_extractor_format(__magic_name__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ ): # <Added version="2.4.0"/>
lowerCamelCase : Dict = cls._get_magic_number_max_length()
lowerCamelCase : Optional[Any] = cls._read_magic_number(__magic_name__ , __magic_name__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__magic_name__ , magic_number=__magic_name__ ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = "deprecated" , ):
os.makedirs(os.path.dirname(__magic_name__ ) , exist_ok=__magic_name__ )
# Prevent parallel extractions
lowerCamelCase : Tuple = str(Path(__magic_name__ ).with_suffix(""".lock""" ) )
with FileLock(__magic_name__ ):
shutil.rmtree(__magic_name__ , ignore_errors=__magic_name__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__magic_name__ , __magic_name__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=__magic_name__ , )
lowerCamelCase : int = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(__magic_name__ , __magic_name__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=__magic_name__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(__magic_name__ ):
return extractor.extract(__magic_name__ , __magic_name__ )
| 287 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
A__ : str = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
A__ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
A__ : List[Any] = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="binary" , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
__lowerCamelCase : Dict = fa_score(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , pos_label=SCREAMING_SNAKE_CASE_ , average=SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ )
return {"f1": float(SCREAMING_SNAKE_CASE_ ) if score.size == 1 else score}
| 185 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase =5_0_0_0_0_0
_lowerCamelCase , _lowerCamelCase =os.path.split(__file__)
_lowerCamelCase =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.map(**lowerCamelCase )
@get_duration
def _a ( lowerCamelCase, **lowerCamelCase ):
lowerCamelCase : Optional[Any] = dataset.filter(**lowerCamelCase )
def _a ( ):
lowerCamelCase : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowerCamelCase, """dataset.arrow""" ), lowerCamelCase, num_examples=lowerCamelCase )
lowerCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowerCamelCase )
def tokenize(lowerCamelCase ):
return tokenizer(examples["""text"""] )
lowerCamelCase : List[str] = map(lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
lowerCamelCase : List[str] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
lowerCamelCase : Optional[int] = map(lowerCamelCase, function=lambda lowerCamelCase : None, batched=lowerCamelCase )
lowerCamelCase : int = map(lowerCamelCase, function=lowerCamelCase, batched=lowerCamelCase )
lowerCamelCase : Union[str, Any] = filter(lowerCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase, """wb""" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 287 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=True ) -> List[str]:
model.train()
UpperCAmelCase : Dict = model(_lowerCAmelCase )
UpperCAmelCase : Any = F.mse_loss(_lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
set_seed(42 )
UpperCAmelCase : Tuple = RegressionModel()
UpperCAmelCase : Any = deepcopy(_lowerCAmelCase )
UpperCAmelCase : Any = RegressionDataset(length=80 )
UpperCAmelCase : Dict = DataLoader(_lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase : int = AdamW(params=model.parameters() , lr=1e-3 )
UpperCAmelCase : Optional[Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCAmelCase : str = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 )
UpperCAmelCase : Tuple = LambdaLR(_lowerCAmelCase , lr_lambda=lambda _lowerCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
UpperCAmelCase : Optional[int] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase : List[Any] = get_training_setup(_lowerCAmelCase )
# Use a single batch
UpperCAmelCase : Union[str, Any] = next(iter(_lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCAmelCase ):
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
# Sync grads
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : List[Any] = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
def snake_case_ ( _lowerCAmelCase : int ) -> List[Any]:
# Test on distributed setup that context manager behaves properly
UpperCAmelCase : int = get_training_setup(_lowerCAmelCase )
# Use a single batch
UpperCAmelCase : Union[str, Any] = next(iter(_lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase : Any = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCAmelCase ):
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
# Sync grads
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : Optional[Any] = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
def snake_case_ ( _lowerCAmelCase : Any=False , _lowerCAmelCase : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase : Any = Accelerator(
split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase : Any = get_training_setup(_lowerCAmelCase )
for iteration, batch in enumerate(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCAmelCase ):
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase : Any = ddp_input[torch.randperm(len(_lowerCAmelCase ) )]
GradientState._reset_state()
def snake_case_ ( _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[int]=False ) -> Tuple:
UpperCAmelCase : List[Any] = Accelerator(
split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase : List[Any] = get_training_setup(_lowerCAmelCase , _lowerCAmelCase )
for iteration, batch in enumerate(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCAmelCase ):
step_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def snake_case_ ( ) -> Tuple:
UpperCAmelCase : int = Accelerator()
UpperCAmelCase : Optional[Any] = RegressionDataset(length=80 )
UpperCAmelCase : List[str] = DataLoader(_lowerCAmelCase , batch_size=16 )
UpperCAmelCase : int = RegressionDataset(length=96 )
UpperCAmelCase : Optional[int] = DataLoader(_lowerCAmelCase , batch_size=16 )
UpperCAmelCase : Optional[Any] = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase )
if iteration < len(_lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCAmelCase )
if batch_num < len(_lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def snake_case_ ( ) -> Tuple:
UpperCAmelCase : List[Any] = Accelerator()
UpperCAmelCase : int = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(_lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(_lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_lowerCAmelCase , _lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 23 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 0 |
from __future__ import annotations
class A__:
"""simple docstring"""
def __init__( self , _lowercase = 0 ) -> Any:
a_ : List[str] = key
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Union[str, Any]:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
a_ : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowercase ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Union[str, Any]:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
a_ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowercase ) ^ key ) for ch in content]
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> Union[str, Any]:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
a_ : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : int = """"""
for ch in content:
ans += chr(ord(_lowercase ) ^ key )
return ans
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> Dict:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
a_ : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a_ : Union[str, Any] = """"""
for ch in content:
ans += chr(ord(_lowercase ) ^ key )
return ans
def UpperCamelCase__ ( self , _lowercase , _lowercase = 0 ) -> Optional[Any]:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
try:
with open(_lowercase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowercase , _lowercase ) )
except OSError:
return False
return True
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Any:
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
try:
with open(_lowercase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowercase , _lowercase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 248 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowerCamelCase =logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if len(lowercase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase_ )
or left < -len(lowercase_ )
or right >= len(lowercase_ )
or right < -len(lowercase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase = (left + right) >> 1 # the middle
UpperCAmelCase = find_max(lowercase_ , lowercase_ , lowercase_ ) # find max in range[left, mid]
UpperCAmelCase = find_max(lowercase_ , mid + 1 , lowercase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 78 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
import os
def lowerCamelCase__ ( ):
"""simple docstring"""
with open(os.path.dirname(__lowerCAmelCase ) + "/p022_names.txt" ) as file:
lowerCAmelCase_ = str(file.readlines()[0] )
lowerCAmelCase_ = names.replace("\"" , "" ).split("," )
names.sort()
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for i, name in enumerate(__lowerCAmelCase ):
for letter in name:
name_score += ord(__lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
lowerCAmelCase_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 231 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : List[Any] = """gpt_neo"""
_UpperCAmelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCAmelCase : List[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __magic_name__=5_0_2_5_7 , __magic_name__=2_0_4_8 , __magic_name__=2_0_4_8 , __magic_name__=2_4 , __magic_name__=[[["global", "local"], 1_2]] , __magic_name__=1_6 , __magic_name__=None , __magic_name__=2_5_6 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , **__magic_name__ , ):
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_layers
lowerCamelCase : str = num_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : List[Any] = window_size
lowerCamelCase : int = activation_function
lowerCamelCase : Union[str, Any] = resid_dropout
lowerCamelCase : List[Any] = embed_dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Dict = classifier_dropout
lowerCamelCase : Any = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = use_cache
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : int = eos_token_id
lowerCamelCase : List[Any] = attention_types
lowerCamelCase : Optional[Any] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
lowerCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : Any = input.size()
lowerCamelCase : List[Any] = len(lowerCamelCase )
lowerCamelCase : Optional[Any] = shape[dimension]
lowerCamelCase : Optional[int] = torch.arange(0, lowerCamelCase, lowerCamelCase )
lowerCamelCase : Dict = torch.div(sizedim - size, lowerCamelCase, rounding_mode="""floor""" ) + 1
lowerCamelCase : int = torch.arange(lowerCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase : str = [slice(lowerCamelCase )] * rank
lowerCamelCase : List[str] = indices
lowerCamelCase : Dict = input[s]
lowerCamelCase : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase ):
import torch
lowerCamelCase : List[Any] = torch.arange(1, lowerCamelCase )
lowerCamelCase : Optional[int] = torch.remainder(lowerCamelCase, lowerCamelCase )
lowerCamelCase : List[Any] = remainders == 0
lowerCamelCase : List[Any] = candidates[divisor_indices]
lowerCamelCase : Optional[Any] = torch.max(lowerCamelCase )
return largest_divisor, torch.div(lowerCamelCase, lowerCamelCase, rounding_mode="""floor""" )
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
lowerCamelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase : Tuple = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase__ ( self ):
return self._config.num_heads
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase : Optional[int] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase : int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase , lowerCamelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase : Optional[int] = seqlen + 2
lowerCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase : str = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase : str = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ):
return 1_3
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : Any = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(__lowerCAmelCase ) != 0:
lowercase__ : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__lowerCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise error
lowercase__ : Optional[int] = rows
else:
lowercase__ : List[str] = []
def _lowerCAmelCase( self ) -> List[Any]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowerCAmelCase( self ) -> List[Any]:
return len(self.rows )
@property
def _lowerCAmelCase( self ) -> Dict:
return len(self.rows[0] )
@property
def _lowerCAmelCase( self ) -> Union[str, Any]:
return (self.num_rows, self.num_columns)
@property
def _lowerCAmelCase( self ) -> List[Any]:
return self.order[0] == self.order[1]
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowerCAmelCase( self ) -> Tuple:
return bool(self.determinant() )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
lowercase__ : int = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__lowerCAmelCase ).determinant()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if (row + column) % 2 == 0:
return self.get_minor(__lowerCAmelCase , __lowerCAmelCase )
return -1 * self.get_minor(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
return Matrix(
[
[self.get_minor(__lowerCAmelCase , __lowerCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowerCAmelCase( self ) -> Optional[Any]:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : Optional[Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> Any:
return str(self.rows )
def __str__( self ) -> int:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(__lowerCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Union[str, Any]:
lowercase__ : Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise type_error
for value in row:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise type_error
if len(__lowerCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(__lowerCAmelCase )
else:
lowercase__ : Union[str, Any] = self.rows[0:position] + [row] + self.rows[position:]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Optional[Any]:
lowercase__ : int = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise type_error
for value in column:
if not isinstance(__lowerCAmelCase , (int, float) ):
raise type_error
if len(__lowerCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
lowercase__ : Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase__ : Union[str, Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , __lowerCAmelCase ) -> List[str]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __lowerCAmelCase ) -> Optional[Any]:
return not self == other
def __neg__( self ) -> Any:
return self * -1
def __add__( self , __lowerCAmelCase ) -> Union[str, Any]:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , __lowerCAmelCase ) -> Union[str, Any]:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , __lowerCAmelCase ) -> Union[str, Any]:
if isinstance(__lowerCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(__lowerCAmelCase , __lowerCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , __lowerCAmelCase ) -> Union[str, Any]:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
lowercase__ : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
return sum(row[i] * column[i] for i in range(len(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 287 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class _A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase : Any = """van"""
def __init__( self : str , __UpperCAmelCase : Optional[int]=224 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Dict=[7, 3, 3, 3] , __UpperCAmelCase : str=[4, 2, 2, 2] , __UpperCAmelCase : Any=[64, 128, 320, 512] , __UpperCAmelCase : Dict=[3, 3, 12, 3] , __UpperCAmelCase : List[str]=[8, 8, 4, 4] , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Tuple=1e-6 , __UpperCAmelCase : List[Any]=1e-2 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : int=0.0 , **__UpperCAmelCase : List[str] , ):
super().__init__(**__UpperCAmelCase)
a : List[Any] = image_size
a : Any = num_channels
a : Optional[int] = patch_sizes
a : List[Any] = strides
a : Tuple = hidden_sizes
a : Optional[int] = depths
a : List[Any] = mlp_ratios
a : List[Any] = hidden_act
a : Any = initializer_range
a : str = layer_norm_eps
a : List[str] = layer_scale_init_value
a : Dict = drop_path_rate
a : List[Any] = dropout_rate
| 40 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=True ):
model.train()
lowerCamelCase : Dict = model(lowerCamelCase )
lowerCamelCase : Any = F.mse_loss(lowerCamelCase, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCamelCase )
def _a ( lowerCamelCase, lowerCamelCase=False ):
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Any = deepcopy(lowerCamelCase )
lowerCamelCase : Any = RegressionDataset(length=80 )
lowerCamelCase : Dict = DataLoader(lowerCamelCase, batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : int = AdamW(params=model.parameters(), lr=1e-3 )
lowerCamelCase : Optional[Any] = AdamW(params=ddp_model.parameters(), lr=1e-3 )
lowerCamelCase : str = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
lowerCamelCase : Tuple = LambdaLR(lowerCamelCase, lr_lambda=lambda lowerCamelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase , lowerCamelCase : List[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( lowerCamelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : List[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase ):
# Test on distributed setup that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = get_training_setup(lowerCamelCase )
# Use a single batch
lowerCamelCase , lowerCamelCase : Union[str, Any] = next(iter(lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
# Sync grads
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(lowerCamelCase ) )]
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : Any = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = get_training_setup(lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(lowerCamelCase ) )]
GradientState._reset_state()
def _a ( lowerCamelCase=False, lowerCamelCase=False ):
lowerCamelCase : List[Any] = Accelerator(
split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_training_setup(lowerCamelCase, lowerCamelCase )
for iteration, batch in enumerate(lowerCamelCase ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase , lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCamelCase ):
step_model(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _a ( ):
lowerCamelCase : int = Accelerator()
lowerCamelCase : Optional[Any] = RegressionDataset(length=80 )
lowerCamelCase : List[str] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase : int = RegressionDataset(length=96 )
lowerCamelCase : Optional[int] = DataLoader(lowerCamelCase, batch_size=16 )
lowerCamelCase , lowerCamelCase : Optional[Any] = accelerator.prepare(lowerCamelCase, lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if iteration < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase )
if batch_num < len(lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ):
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(lowerCamelCase, lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase, lowerCamelCase )
def _a ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 287 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None ) -> Tuple:
if components is None:
_a = []
_a = list(__UpperCAmelCase )
def __len__( self ) -> Union[str, Any]:
return len(self.__components )
def __str__( self ) -> List[str]:
return "(" + ",".join(map(__UpperCAmelCase , self.__components ) ) + ")"
def __add__( self , __UpperCAmelCase ) -> List[Any]:
_a = len(self )
if size == len(__UpperCAmelCase ):
_a = [self.__components[i] + other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )]
return Vector(__UpperCAmelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self , __UpperCAmelCase ) -> Union[str, Any]:
_a = len(self )
if size == len(__UpperCAmelCase ):
_a = [self.__components[i] - other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )]
return Vector(__UpperCAmelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , __UpperCAmelCase ) -> List[Any]:
...
@overload
def __mul__( self , __UpperCAmelCase ) -> str:
...
def __mul__( self , __UpperCAmelCase ) -> Union[str, Any]:
if isinstance(__UpperCAmelCase , (float, int) ):
_a = [c * other for c in self.__components]
return Vector(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(self ) == len(__UpperCAmelCase ):
_a = len(self )
_a = [self.__components[i] * other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )]
return sum(__UpperCAmelCase )
else: # error case
raise Exception('''invalid operand!''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
return Vector(self.__components )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
assert -len(self.__components ) <= pos < len(self.__components )
_a = value
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_a = [c**2 for c in self.__components]
return math.sqrt(sum(__UpperCAmelCase ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ) -> Optional[int]:
_a = self * other
_a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
return Vector([0] * dimension )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) and (isinstance(_lowerCAmelCase, _lowerCAmelCase ))
_a = [0] * dimension
_a = 1
return Vector(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert (
isinstance(_lowerCAmelCase, _lowerCAmelCase )
and isinstance(_lowerCAmelCase, _lowerCAmelCase )
and (isinstance(_lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Tuple, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
_a = [random.randint(_lowerCAmelCase, _lowerCAmelCase ) for _ in range(_lowerCAmelCase )]
return Vector(_lowerCAmelCase )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = matrix
_a = w
_a = h
def __str__( self ) -> Union[str, Any]:
_a = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , __UpperCAmelCase ) -> List[Any]:
if self.__width == other.width() and self.__height == other.height():
_a = []
for i in range(self.__height ):
_a = [
self.__matrix[i][j] + other.component(__UpperCAmelCase , __UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(__UpperCAmelCase )
return Matrix(__UpperCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , __UpperCAmelCase ) -> List[Any]:
if self.__width == other.width() and self.__height == other.height():
_a = []
for i in range(self.__height ):
_a = [
self.__matrix[i][j] - other.component(__UpperCAmelCase , __UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(__UpperCAmelCase )
return Matrix(__UpperCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , __UpperCAmelCase ) -> Dict:
...
@overload
def __mul__( self , __UpperCAmelCase ) -> int:
...
def __mul__( self , __UpperCAmelCase ) -> Optional[Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ): # matrix-vector
if len(__UpperCAmelCase ) == self.__width:
_a = zero_vector(self.__height )
for i in range(self.__height ):
_a = [
self.__matrix[i][j] * other.component(__UpperCAmelCase )
for j in range(self.__width )
]
ans.change_component(__UpperCAmelCase , sum(__UpperCAmelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__UpperCAmelCase , (int, float) ): # matrix-scalar
_a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__UpperCAmelCase , self.__width , self.__height )
return None
def _UpperCAmelCase ( self ) -> Tuple:
return self.__height
def _UpperCAmelCase ( self ) -> str:
return self.__width
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if 0 <= x < self.__height and 0 <= y < self.__width:
_a = value
else:
raise Exception('''change_component: indices out of bounds''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__UpperCAmelCase ) ):
_a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__UpperCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__UpperCAmelCase , __UpperCAmelCase )
else:
raise Exception('''Indices out of bounds''' )
def _UpperCAmelCase ( self ) -> str:
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_a = [
self.__matrix[0][y] * self.cofactor(0 , __UpperCAmelCase ) for y in range(self.__width )
]
return sum(__UpperCAmelCase )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = [[0] * n for _ in range(_lowerCAmelCase )]
return Matrix(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : Any ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
_a = [
[random.randint(_lowerCAmelCase, _lowerCAmelCase ) for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )
]
return Matrix(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) | 320 |
from scipy.stats import pearsonr
import datasets
_lowerCamelCase ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
_lowerCamelCase ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
_lowerCamelCase ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
if return_pvalue:
lowerCamelCase : Optional[Any] = pearsonr(__magic_name__ , __magic_name__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__magic_name__ , __magic_name__ )[0] )}
| 287 | 0 |
'''simple docstring'''
import math
def __snake_case( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
snake_case__ : int = range(3 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=1 , **_lowerCAmelCase ) -> Dict:
snake_case__ : int = factor * value
snake_case__ : str = value
while not is_prime(_lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCAmelCase )
return value
| 35 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """conditional_detr"""
_UpperCAmelCase : Optional[int] = ["""past_key_values"""]
_UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __magic_name__=True , __magic_name__=None , __magic_name__=3 , __magic_name__=3_0_0 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=6 , __magic_name__=2_0_4_8 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__="relu" , __magic_name__=2_5_6 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=False , __magic_name__="sine" , __magic_name__="resnet50" , __magic_name__=True , __magic_name__=False , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=1 , __magic_name__=1 , __magic_name__=2 , __magic_name__=5 , __magic_name__=2 , __magic_name__=0.25 , **__magic_name__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase : Optional[int] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : str = config_class.from_dict(__magic_name__ )
lowerCamelCase : Dict = use_timm_backbone
lowerCamelCase : str = backbone_config
lowerCamelCase : Tuple = num_channels
lowerCamelCase : Dict = num_queries
lowerCamelCase : Any = d_model
lowerCamelCase : Optional[Any] = encoder_ffn_dim
lowerCamelCase : List[str] = encoder_layers
lowerCamelCase : Union[str, Any] = encoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : Dict = decoder_layers
lowerCamelCase : Union[str, Any] = decoder_attention_heads
lowerCamelCase : Dict = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Union[str, Any] = activation_dropout
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : int = init_std
lowerCamelCase : str = init_xavier_std
lowerCamelCase : Tuple = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : Optional[int] = auxiliary_loss
lowerCamelCase : Optional[Any] = position_embedding_type
lowerCamelCase : Optional[int] = backbone
lowerCamelCase : Union[str, Any] = use_pretrained_backbone
lowerCamelCase : str = dilation
# Hungarian matcher
lowerCamelCase : Optional[Any] = class_cost
lowerCamelCase : Dict = bbox_cost
lowerCamelCase : Tuple = giou_cost
# Loss coefficients
lowerCamelCase : Union[str, Any] = mask_loss_coefficient
lowerCamelCase : Dict = dice_loss_coefficient
lowerCamelCase : Optional[int] = cls_loss_coefficient
lowerCamelCase : Optional[int] = bbox_loss_coefficient
lowerCamelCase : Optional[int] = giou_loss_coefficient
lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def UpperCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
return self.d_model
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase : Optional[int] = self.backbone_config.to_dict()
lowerCamelCase : Optional[Any] = self.__class__.model_type
return output
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-5
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 |
import json
import sys
def _a ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, encoding="""utf-8""" ) as f:
lowerCamelCase : List[Any] = json.load(lowerCamelCase )
lowerCamelCase : Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = results[benchmark_name]
lowerCamelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase : Any = """| metric |"""
lowerCamelCase : str = """|--------|"""
lowerCamelCase : List[Any] = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase ):
lowerCamelCase : List[Any] = benchmark_res[metric_name]
lowerCamelCase : Tuple = metric_vals["""new"""]
lowerCamelCase : int = metric_vals.get("""old""", lowerCamelCase )
lowerCamelCase : Dict = metric_vals.get("""diff""", lowerCamelCase )
lowerCamelCase : Dict = F''' {new_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else """None"""
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(lowerCamelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(lowerCamelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase ) )
if __name__ == "__main__":
_lowerCamelCase =sys.argv[1]
_lowerCamelCase =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case__ :
def __init__( self : Optional[Any] , _A : Dict , _A : Union[str, Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : str=True , _A : str=True , _A : Tuple=True , _A : str=99 , _A : Dict=32 , _A : List[str]=2 , _A : Tuple=4 , _A : Union[str, Any]=37 , _A : Any="gelu" , _A : Union[str, Any]=0.1 , _A : int=0.1 , _A : Optional[Any]=5_12 , _A : str=16 , _A : Union[str, Any]=2 , _A : str=0.02 , _A : List[Any]=3 , _A : Tuple=4 , _A : int=None , _A : List[str]=10_00 , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_input_mask
UpperCAmelCase_ : str = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Dict = num_choices
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : Optional[int] = range_bbox
def A ( self : Any ) -> Any:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : Dict = bbox[i, j, 3]
UpperCAmelCase_ : Any = bbox[i, j, 1]
UpperCAmelCase_ : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Union[str, Any] = bbox[i, j, 2]
UpperCAmelCase_ : Tuple = bbox[i, j, 0]
UpperCAmelCase_ : Optional[Any] = t
UpperCAmelCase_ : str = tf.convert_to_tensor(_A )
UpperCAmelCase_ : int = None
if self.use_input_mask:
UpperCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] , _A : str , _A : str , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[Any] ) -> Tuple:
UpperCAmelCase_ : int = TFLayoutLMModel(config=_A )
UpperCAmelCase_ : Optional[int] = model(_A , _A , attention_mask=_A , token_type_ids=_A )
UpperCAmelCase_ : Tuple = model(_A , _A , token_type_ids=_A )
UpperCAmelCase_ : Any = model(_A , _A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , _A : str , _A : Any , _A : Optional[int] , _A : Any , _A : Tuple , _A : List[Any] , _A : int , _A : Tuple ) -> Dict:
UpperCAmelCase_ : Dict = TFLayoutLMForMaskedLM(config=_A )
UpperCAmelCase_ : Tuple = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , _A : Optional[int] , _A : Tuple , _A : List[str] , _A : Dict , _A : Optional[Any] , _A : List[str] , _A : Any , _A : int ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : List[str] = TFLayoutLMForSequenceClassification(config=_A )
UpperCAmelCase_ : Dict = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _A : List[str] , _A : Union[str, Any] , _A : Dict , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Dict ) -> Any:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : Dict = TFLayoutLMForTokenClassification(config=_A )
UpperCAmelCase_ : Optional[Any] = model(_A , _A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , _A : Optional[Any] , _A : List[str] , _A : List[str] , _A : str , _A : int , _A : int , _A : Tuple , _A : List[Any] ) -> List[Any]:
UpperCAmelCase_ : List[str] = TFLayoutLMForQuestionAnswering(config=_A )
UpperCAmelCase_ : Union[str, Any] = model(_A , _A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Any = config_and_inputs
UpperCAmelCase_ : Any = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
a_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": TFLayoutLMModel,
"""fill-mask""": TFLayoutLMForMaskedLM,
"""text-classification""": TFLayoutLMForSequenceClassification,
"""token-classification""": TFLayoutLMForTokenClassification,
"""zero-shot""": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = True
a_ = 10
def A ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = TFLayoutLMModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def A ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def A ( self : int ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def A ( self : List[Any] ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : str = TFLayoutLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def A ( self : List[str] ) -> str:
pass
def __UpperCAmelCase ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
UpperCAmelCase_ : int = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
UpperCAmelCase_ : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase_ : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
UpperCAmelCase_ : Optional[int] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase_ : str = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case__ ( unittest.TestCase):
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
UpperCAmelCase_ : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : List[str] = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the sequence output on [0, :3, :3]
UpperCAmelCase_ : List[str] = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _A , atol=1e-3 ) )
@slow
def A ( self : Dict ) -> Dict:
# initialize model with randomly initialized sequence classification head
UpperCAmelCase_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
UpperCAmelCase_ : List[str] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : Union[str, Any] = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCAmelCase_ : List[str] = outputs.loss
UpperCAmelCase_ : Dict = (2,)
self.assertEqual(loss.shape , _A )
# test the shape of the logits
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : str = (2, 2)
self.assertEqual(logits.shape , _A )
@slow
def A ( self : Any ) -> Optional[int]:
# initialize model with randomly initialized token classification head
UpperCAmelCase_ : int = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
UpperCAmelCase_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : int = model(
input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
# test the shape of the logits
UpperCAmelCase_ : Optional[int] = outputs.logits
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _A )
@slow
def A ( self : str ) -> Optional[int]:
# initialize model with randomly initialized token classification head
UpperCAmelCase_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
UpperCAmelCase_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase_ : Dict = model(input_ids=_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
# test the shape of the logits
UpperCAmelCase_ : Dict = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _A )
self.assertEqual(outputs.end_logits.shape , _A )
| 304 |
def _a ( lowerCamelCase ):
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 287 | 0 |
'''simple docstring'''
import pytest
A__ : Tuple = """__dummy_dataset1__"""
A__ : Optional[Any] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def UpperCAmelCase__ ( ) -> int:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCAmelCase__ ( ) -> Optional[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Any:
__lowerCamelCase : Union[str, Any] = dataset_loading_script_name
__lowerCamelCase : List[Any] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=UpperCAmelCase_ )
__lowerCamelCase : Dict = script_dir / F'{script_name}.py'
with open(UpperCAmelCase_ , 'w' ) as f:
f.write(UpperCAmelCase_ )
return str(UpperCAmelCase_ )
| 185 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase__ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase__ = Features({"""audio""": Audio()} )
lowerCamelCase__ = Features({"""labels""": ClassLabel} )
lowerCamelCase__ = "audio"
lowerCamelCase__ = "labels"
def A ( self : Optional[Any] , __snake_case : str ) -> List[str]:
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase : int = copy.deepcopy(self )
UpperCAmelCase : Tuple = self.label_schema.copy()
UpperCAmelCase : Optional[int] = features[self.label_column]
UpperCAmelCase : Dict = label_schema
return task_template
@property
def A ( self : Optional[int] ) -> int:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__snake_case : int = get_logger(__name__)
class A__:
"""simple docstring"""
def __init__( self , _lowercase = None ) -> str:
a_ : Dict = (
os.path.join(_lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a_ : List[str] = Extractor
def UpperCamelCase__ ( self , _lowercase ) -> int:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a_ : int = os.path.abspath(_lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(_lowercase ) )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> Optional[int]:
return force_extract or (
not os.path.isfile(_lowercase ) and not (os.path.isdir(_lowercase ) and os.listdir(_lowercase ))
)
def UpperCamelCase__ ( self , _lowercase , _lowercase = False ) -> Union[str, Any]:
a_ : Union[str, Any] = self.extractor.infer_extractor_format(_lowercase )
if not extractor_format:
return input_path
a_ : int = self._get_output_path(_lowercase )
if self._do_extract(_lowercase , _lowercase ):
self.extractor.extract(_lowercase , _lowercase , _lowercase )
return output_path
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@classmethod
@abstractmethod
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Tuple:
...
@staticmethod
@abstractmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[str]:
...
class A__(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[bytes] = []
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Dict:
with open(_lowercase , """rb""" ) as f:
return f.read(_lowercase )
@classmethod
def UpperCamelCase__ ( cls , _lowercase , _lowercase = b"" ) -> Union[str, Any]:
if not magic_number:
a_ : Optional[Any] = max(len(_lowercase ) for cls_magic_number in cls.magic_numbers )
try:
a_ : Tuple = cls.read_magic_number(_lowercase , _lowercase )
except OSError:
return False
return any(magic_number.startswith(_lowercase ) for cls_magic_number in cls.magic_numbers )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls , _lowercase , **_lowercase ) -> Tuple:
return tarfile.is_tarfile(_lowercase )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Tuple:
def resolved(_lowercase ) -> str:
return os.path.realpath(os.path.abspath(_lowercase ) )
def badpath(_lowercase , _lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_lowercase , _lowercase ) ).startswith(_lowercase )
def badlink(_lowercase , _lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
a_ : List[str] = resolved(os.path.join(_lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_lowercase )
a_ : Optional[Any] = resolved(_lowercase )
for finfo in members:
if badpath(finfo.name , _lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(_lowercase , _lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(_lowercase , _lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Any:
os.makedirs(_lowercase , exist_ok=_lowercase )
a_ : Dict = tarfile.open(_lowercase )
tar_file.extractall(_lowercase , members=TarExtractor.safemembers(_lowercase , _lowercase ) )
tar_file.close()
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : str = [B"""\x1F\x8B"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Optional[int]:
with gzip.open(_lowercase , """rb""" ) as gzip_file:
with open(_lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCamelCase__ ( cls , _lowercase , _lowercase = b"" ) -> List[str]:
if super().is_extractable(_lowercase , magic_number=_lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_lowercase , """rb""" ) as fp:
a_ : List[str] = _EndRecData(_lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a_ : List[Any] = fp.read(_lowercase ) # CD is where we expect it to be
if len(_lowercase ) == sizeCentralDir:
a_ : str = struct.unpack(_lowercase , _lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> int:
os.makedirs(_lowercase , exist_ok=_lowercase )
with zipfile.ZipFile(_lowercase , """r""" ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[str] = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Optional[int]:
with lzma.open(_lowercase ) as compressed_file:
with open(_lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_lowercase , exist_ok=_lowercase )
a_ : Union[str, Any] = rarfile.RarFile(_lowercase )
rf.extractall(_lowercase )
rf.close()
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Tuple = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[str]:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
a_ : int = zstd.ZstdDecompressor()
with open(_lowercase , """rb""" ) as ifh, open(_lowercase , """wb""" ) as ofh:
dctx.copy_stream(_lowercase , _lowercase )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> int:
with bza.open(_lowercase , """rb""" ) as compressed_file:
with open(_lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[Any] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> Dict:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_lowercase , exist_ok=_lowercase )
with pyazr.SevenZipFile(_lowercase , """r""" ) as archive:
archive.extractall(_lowercase )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : List[Any] = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[str]:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_lowercase , """rb""" ) as compressed_file:
with open(_lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(_lowercase , _lowercase )
class A__:
"""simple docstring"""
_A : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
return max(
len(_lowercase )
for extractor in cls.extractors.values()
if issubclass(_lowercase , _lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase__ ( _lowercase , _lowercase ) -> List[Any]:
try:
return MagicNumberBaseExtractor.read_magic_number(_lowercase , magic_number_length=_lowercase )
except OSError:
return b""
@classmethod
def UpperCamelCase__ ( cls , _lowercase , _lowercase = False ) -> str:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_lowercase , )
a_ : int = cls.infer_extractor_format(_lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase__ ( cls , _lowercase ) -> str: # <Added version="2.4.0"/>
a_ : Dict = cls._get_magic_number_max_length()
a_ : Optional[Any] = cls._read_magic_number(_lowercase , _lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_lowercase , magic_number=_lowercase ):
return extractor_format
@classmethod
def UpperCamelCase__ ( cls , _lowercase , _lowercase , _lowercase = None , _lowercase = "deprecated" , ) -> Dict:
os.makedirs(os.path.dirname(_lowercase ) , exist_ok=_lowercase )
# Prevent parallel extractions
a_ : Tuple = str(Path(_lowercase ).with_suffix(""".lock""" ) )
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_lowercase , _lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_lowercase , )
a_ : int = extractor if extractor != """deprecated""" else extractor_format
else:
a_ : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(_lowercase , _lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_lowercase ):
return extractor.extract(_lowercase , _lowercase )
| 248 |
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = botoa.client('iam' )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ , AssumeRolePolicyDocument=json.dumps(lowercase_ , indent=2 ) )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowercase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = botoa.client('iam' )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def _lowerCAmelCase ( ):
UpperCAmelCase = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , lowercase_ , )
UpperCAmelCase = None
if credentials_configuration == 0:
UpperCAmelCase = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
UpperCAmelCase = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
UpperCAmelCase = _ask_field('AWS Access Key ID: ' )
UpperCAmelCase = aws_access_key_id
UpperCAmelCase = _ask_field('AWS Secret Access Key: ' )
UpperCAmelCase = aws_secret_access_key
UpperCAmelCase = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
UpperCAmelCase = aws_region
UpperCAmelCase = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , lowercase_ , )
if role_management == 0:
UpperCAmelCase = _ask_field('Enter your IAM role name: ' )
else:
UpperCAmelCase = """accelerate_sagemaker_execution_role"""
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowercase_ )
UpperCAmelCase = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_custom_docker_image:
UpperCAmelCase = _ask_field('Enter your Docker image: ' , lambda lowercase_ : str(lowercase_ ).lower() )
UpperCAmelCase = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda lowercase_ : str(lowercase_ ).lower() , )
UpperCAmelCase = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda lowercase_ : str(lowercase_ ).lower() , )
UpperCAmelCase = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase = {}
UpperCAmelCase = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
if use_dynamo:
UpperCAmelCase = """dynamo_"""
UpperCAmelCase = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
if use_custom_options:
UpperCAmelCase = _ask_options(
'Which mode do you want to use?' , lowercase_ , lambda lowercase_ : TORCH_DYNAMO_MODES[int(lowercase_ )] , default='default' , )
UpperCAmelCase = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=lowercase_ , error_message='Please enter yes or no.' , )
UpperCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase = _ask_options(
lowercase_ , lowercase_ , lambda lowercase_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase = _ask_field(lowercase_ , lambda lowercase_ : str(lowercase_ ).lower() , default='ml.p3.2xlarge' )
UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase = _ask_field(
'How many machines do you want use? [1]: ' , lowercase_ , default=1 , )
UpperCAmelCase = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=lowercase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase_ , use_cpu=lowercase_ , dynamo_config=lowercase_ , eca_instance_type=lowercase_ , profile=lowercase_ , region=lowercase_ , iam_role_name=lowercase_ , mixed_precision=lowercase_ , num_machines=lowercase_ , sagemaker_inputs_file=lowercase_ , sagemaker_metrics_file=lowercase_ , )
| 78 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_UpperCAmelCase : ClassVar[Features] = Features({"""audio""": Audio()})
_UpperCAmelCase : ClassVar[Features] = Features({"""transcription""": Value("""string""")})
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def UpperCamelCase__ ( self , __magic_name__ ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __magic_name__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase : Optional[Any] = copy.deepcopy(self )
lowerCamelCase : List[Any] = self.input_schema.copy()
lowerCamelCase : Tuple = features[self.audio_column]
lowerCamelCase : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 287 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.